diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala
index aea7fa097d..c21c5a2553 100644
--- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala
+++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala
@@ -17,20 +17,22 @@ import scala.compat.java8.OptionConverters._
/**
* Representation of a Log Event issued by a [[akka.actor.typed.Behavior]]
*/
-final case class CapturedLogEvent(logLevel: LogLevel,
- message: String,
- cause: Option[Throwable],
- marker: Option[LogMarker],
- mdc: Map[String, Any]) {
+final case class CapturedLogEvent(
+ logLevel: LogLevel,
+ message: String,
+ cause: Option[Throwable],
+ marker: Option[LogMarker],
+ mdc: Map[String, Any]) {
/**
* Constructor for Java API
*/
- def this(logLevel: LogLevel,
- message: String,
- errorCause: Optional[Throwable],
- marker: Optional[LogMarker],
- mdc: java.util.Map[String, Any]) {
+ def this(
+ logLevel: LogLevel,
+ message: String,
+ errorCause: Optional[Throwable],
+ marker: Optional[LogMarker],
+ mdc: java.util.Map[String, Any]) {
this(logLevel, message, errorCause.asScala, marker.asScala, mdc.asScala.toMap)
}
@@ -88,11 +90,12 @@ object CapturedLogEvent {
* INTERNAL API
*/
@InternalApi
- private[akka] def apply(logLevel: LogLevel,
- message: String,
- errorCause: OptionVal[Throwable],
- logMarker: OptionVal[LogMarker],
- mdc: Map[String, Any]): CapturedLogEvent = {
+ private[akka] def apply(
+ logLevel: LogLevel,
+ message: String,
+ errorCause: OptionVal[Throwable],
+ logMarker: OptionVal[LogMarker],
+ mdc: Map[String, Any]): CapturedLogEvent = {
new CapturedLogEvent(logLevel, message, toOption(errorCause), toOption(logMarker), mdc)
}
}
diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala
index 4f2a3f00f6..f495e3fdd6 100644
--- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala
+++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala
@@ -69,13 +69,15 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T
override def isInfoEnabled(marker: LogMarker): Boolean = true
override def isDebugEnabled(marker: LogMarker): Boolean = true
- override private[akka] def notifyError(message: String,
- cause: OptionVal[Throwable],
- marker: OptionVal[LogMarker]): Unit =
+ override private[akka] def notifyError(
+ message: String,
+ cause: OptionVal[Throwable],
+ marker: OptionVal[LogMarker]): Unit =
logBuffer = CapturedLogEvent(Logging.ErrorLevel, message, cause, marker, mdc) :: logBuffer
- override private[akka] def notifyWarning(message: String,
- cause: OptionVal[Throwable],
- marker: OptionVal[LogMarker]): Unit =
+ override private[akka] def notifyWarning(
+ message: String,
+ cause: OptionVal[Throwable],
+ marker: OptionVal[LogMarker]): Unit =
logBuffer = CapturedLogEvent(Logging.WarningLevel, message, OptionVal.None, marker, mdc) :: logBuffer
override private[akka] def notifyInfo(message: String, marker: OptionVal[LogMarker]): Unit =
@@ -111,18 +113,20 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T
override def isInfoEnabled(marker: LogMarker): Boolean = actual.isInfoEnabled(marker)
override def isDebugEnabled(marker: LogMarker): Boolean = actual.isDebugEnabled(marker)
- override private[akka] def notifyError(message: String,
- cause: OptionVal[Throwable],
- marker: OptionVal[LogMarker]): Unit = {
+ override private[akka] def notifyError(
+ message: String,
+ cause: OptionVal[Throwable],
+ marker: OptionVal[LogMarker]): Unit = {
val original = actual.mdc
actual.mdc = mdc
actual.notifyError(message, cause, marker)
actual.mdc = original
}
- override private[akka] def notifyWarning(message: String,
- cause: OptionVal[Throwable],
- marker: OptionVal[LogMarker]): Unit = {
+ override private[akka] def notifyWarning(
+ message: String,
+ cause: OptionVal[Throwable],
+ marker: OptionVal[LogMarker]): Unit = {
val original = actual.mdc
actual.mdc = mdc
actual.notifyWarning(message, cause, marker)
diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala
index 1667fa252b..e7600e2f7b 100644
--- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala
+++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala
@@ -266,9 +266,10 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_])
override def fishForMessage(max: JDuration, fisher: java.util.function.Function[M, FishingOutcome]): JList[M] =
fishForMessage(max, "", fisher)
- override def fishForMessage(max: JDuration,
- hint: String,
- fisher: java.util.function.Function[M, FishingOutcome]): JList[M] =
+ override def fishForMessage(
+ max: JDuration,
+ hint: String,
+ fisher: java.util.function.Function[M, FishingOutcome]): JList[M] =
fishForMessage_internal(max.asScala.dilated, hint, fisher.apply).asJava
private def fishForMessage_internal(max: FiniteDuration, hint: String, fisher: M => FishingOutcome): List[M] = {
@@ -281,9 +282,10 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_])
try fisher(message)
catch {
case ex: MatchError =>
- throw new AssertionError(s"Unexpected message $message while fishing for messages, " +
- s"seen messages ${seen.reverse}, hint: $hint",
- ex)
+ throw new AssertionError(
+ s"Unexpected message $message while fishing for messages, " +
+ s"seen messages ${seen.reverse}, hint: $hint",
+ ex)
}
outcome match {
case FishingOutcome.Complete => (message :: seen).reverse
diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala
index 9309423662..3adf7d2475 100644
--- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala
+++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala
@@ -44,8 +44,9 @@ final class TestKitJunitResource(_kit: ActorTestKit) extends ExternalResource {
*/
def this(customConfig: String) =
this(
- ActorTestKit.create(TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]),
- ConfigFactory.parseString(customConfig)))
+ ActorTestKit.create(
+ TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]),
+ ConfigFactory.parseString(customConfig)))
/**
* Use a custom config for the actor system.
diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala
index 4e96a48913..39b005756d 100644
--- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala
+++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala
@@ -215,9 +215,10 @@ abstract class TestProbe[M] {
/**
* Same as the other `fishForMessage` but includes the provided hint in all error messages
*/
- def fishForMessage(max: Duration,
- hint: String,
- fisher: java.util.function.Function[M, FishingOutcome]): java.util.List[M]
+ def fishForMessage(
+ max: Duration,
+ hint: String,
+ fisher: java.util.function.Function[M, FishingOutcome]): java.util.List[M]
/**
* Expect the given actor to be stopped or stop within the given timeout or
diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala
index d4f22f0a2a..49bde50abc 100644
--- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala
+++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala
@@ -29,9 +29,10 @@ object ActorTestKit {
* the testkit with [[ActorTestKit#shutdownTestKit]].
*/
def apply(): ActorTestKit =
- new ActorTestKit(name = TestKitUtils.testNameFromCallStack(classOf[ActorTestKit]),
- config = noConfigSet,
- settings = None)
+ new ActorTestKit(
+ name = TestKitUtils.testNameFromCallStack(classOf[ActorTestKit]),
+ config = noConfigSet,
+ settings = None)
/**
* Create a named testkit.
@@ -119,9 +120,10 @@ final class ActorTestKit private[akka] (val name: String, val config: Config, se
implicit val timeout: Timeout = testKitSettings.DefaultTimeout
def shutdownTestKit(): Unit = {
- ActorTestKit.shutdown(system,
- testKitSettings.DefaultActorSystemShutdownTimeout,
- testKitSettings.ThrowOnShutdownTimeout)
+ ActorTestKit.shutdown(
+ system,
+ testKitSettings.DefaultActorSystemShutdownTimeout,
+ testKitSettings.ThrowOnShutdownTimeout)
}
/**
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala
index 6293a4afa0..9fbed5b311 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala
@@ -251,14 +251,15 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout {
}
def check(looker: ActorRef): Unit = {
val lookname = looker.path.elements.mkString("", "/", "/")
- for ((l, r) <- Seq(LookupString("a/b/c") -> empty(lookname + "a/b/c"),
- LookupString("") -> system.deadLetters,
- LookupString("akka://all-systems/Nobody") -> system.deadLetters,
- LookupPath(system / "hallo") -> empty("user/hallo"),
- LookupPath(looker.path.child("hallo")) -> empty(lookname + "hallo"), // test Java API
- LookupPath(looker.path.descendant(Seq("a", "b").asJava)) -> empty(lookname + "a/b"), // test Java API
- LookupElems(Seq()) -> system.deadLetters,
- LookupElems(Seq("a")) -> empty(lookname + "a"))) checkOne(looker, l, r)
+ for ((l, r) <- Seq(
+ LookupString("a/b/c") -> empty(lookname + "a/b/c"),
+ LookupString("") -> system.deadLetters,
+ LookupString("akka://all-systems/Nobody") -> system.deadLetters,
+ LookupPath(system / "hallo") -> empty("user/hallo"),
+ LookupPath(looker.path.child("hallo")) -> empty(lookname + "hallo"), // test Java API
+ LookupPath(looker.path.descendant(Seq("a", "b").asJava)) -> empty(lookname + "a/b"), // test Java API
+ LookupElems(Seq()) -> system.deadLetters,
+ LookupElems(Seq("a")) -> empty(lookname + "a"))) checkOne(looker, l, r)
}
for (looker <- all) check(looker)
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala
index 3add8908d4..1ac9d17fcc 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala
@@ -189,20 +189,24 @@ object ActorMailboxSpec {
val UnboundedMailboxTypes = Seq(classOf[UnboundedMessageQueueSemantics])
val BoundedMailboxTypes = Seq(classOf[BoundedMessageQueueSemantics])
- val UnboundedDeqMailboxTypes = Seq(classOf[DequeBasedMessageQueueSemantics],
- classOf[UnboundedMessageQueueSemantics],
- classOf[UnboundedDequeBasedMessageQueueSemantics])
+ val UnboundedDeqMailboxTypes = Seq(
+ classOf[DequeBasedMessageQueueSemantics],
+ classOf[UnboundedMessageQueueSemantics],
+ classOf[UnboundedDequeBasedMessageQueueSemantics])
- val BoundedDeqMailboxTypes = Seq(classOf[DequeBasedMessageQueueSemantics],
- classOf[BoundedMessageQueueSemantics],
- classOf[BoundedDequeBasedMessageQueueSemantics])
+ val BoundedDeqMailboxTypes = Seq(
+ classOf[DequeBasedMessageQueueSemantics],
+ classOf[BoundedMessageQueueSemantics],
+ classOf[BoundedDequeBasedMessageQueueSemantics])
- val BoundedControlAwareMailboxTypes = Seq(classOf[BoundedMessageQueueSemantics],
- classOf[ControlAwareMessageQueueSemantics],
- classOf[BoundedControlAwareMessageQueueSemantics])
- val UnboundedControlAwareMailboxTypes = Seq(classOf[UnboundedMessageQueueSemantics],
- classOf[ControlAwareMessageQueueSemantics],
- classOf[UnboundedControlAwareMessageQueueSemantics])
+ val BoundedControlAwareMailboxTypes = Seq(
+ classOf[BoundedMessageQueueSemantics],
+ classOf[ControlAwareMessageQueueSemantics],
+ classOf[BoundedControlAwareMessageQueueSemantics])
+ val UnboundedControlAwareMailboxTypes = Seq(
+ classOf[UnboundedMessageQueueSemantics],
+ classOf[ControlAwareMessageQueueSemantics],
+ classOf[UnboundedControlAwareMessageQueueSemantics])
trait MCBoundedMessageQueueSemantics extends MessageQueue with MultipleConsumerSemantics
final case class MCBoundedMailbox(val capacity: Int, val pushTimeOut: FiniteDuration)
@@ -240,9 +244,10 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded deque message queue when it is only configured on the props" in {
- checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
- "default-override-from-props",
- UnboundedDeqMailboxTypes)
+ checkMailboxQueue(
+ Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
+ "default-override-from-props",
+ UnboundedDeqMailboxTypes)
}
"get an bounded message queue when it's only configured with RequiresMailbox" in {
@@ -252,12 +257,14 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
"get an unbounded deque message queue when it's only mixed with Stash" in {
checkMailboxQueue(Props[StashQueueReportingActor], "default-override-from-stash", UnboundedDeqMailboxTypes)
checkMailboxQueue(Props(new StashQueueReportingActor), "default-override-from-stash2", UnboundedDeqMailboxTypes)
- checkMailboxQueue(Props(classOf[StashQueueReportingActorWithParams], 17, "hello"),
- "default-override-from-stash3",
- UnboundedDeqMailboxTypes)
- checkMailboxQueue(Props(new StashQueueReportingActorWithParams(17, "hello")),
- "default-override-from-stash4",
- UnboundedDeqMailboxTypes)
+ checkMailboxQueue(
+ Props(classOf[StashQueueReportingActorWithParams], 17, "hello"),
+ "default-override-from-stash3",
+ UnboundedDeqMailboxTypes)
+ checkMailboxQueue(
+ Props(new StashQueueReportingActorWithParams(17, "hello")),
+ "default-override-from-stash4",
+ UnboundedDeqMailboxTypes)
}
"get a bounded message queue when it's configured as mailbox" in {
@@ -273,21 +280,24 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded control aware message queue when it's configured as mailbox" in {
- checkMailboxQueue(Props[QueueReportingActor],
- "default-unbounded-control-aware",
- UnboundedControlAwareMailboxTypes)
+ checkMailboxQueue(
+ Props[QueueReportingActor],
+ "default-unbounded-control-aware",
+ UnboundedControlAwareMailboxTypes)
}
"get an bounded control aware message queue when it's only configured with RequiresMailbox" in {
- checkMailboxQueue(Props[BoundedControlAwareQueueReportingActor],
- "default-override-from-trait-bounded-control-aware",
- BoundedControlAwareMailboxTypes)
+ checkMailboxQueue(
+ Props[BoundedControlAwareQueueReportingActor],
+ "default-override-from-trait-bounded-control-aware",
+ BoundedControlAwareMailboxTypes)
}
"get an unbounded control aware message queue when it's only configured with RequiresMailbox" in {
- checkMailboxQueue(Props[UnboundedControlAwareQueueReportingActor],
- "default-override-from-trait-unbounded-control-aware",
- UnboundedControlAwareMailboxTypes)
+ checkMailboxQueue(
+ Props[UnboundedControlAwareQueueReportingActor],
+ "default-override-from-trait-unbounded-control-aware",
+ UnboundedControlAwareMailboxTypes)
}
"fail to create actor when an unbounded dequeu message queue is configured as mailbox overriding RequestMailbox" in {
@@ -313,9 +323,10 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get a bounded message queue with 0 push timeout when defined in dispatcher" in {
- val q = checkMailboxQueue(Props[QueueReportingActor],
- "default-bounded-mailbox-with-zero-pushtimeout",
- BoundedMailboxTypes)
+ val q = checkMailboxQueue(
+ Props[QueueReportingActor],
+ "default-bounded-mailbox-with-zero-pushtimeout",
+ BoundedMailboxTypes)
q.asInstanceOf[BoundedMessageQueueSemantics].pushTimeOut should ===(Duration.Zero)
}
@@ -324,15 +335,17 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded message queue overriding configuration on the props" in {
- checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
- "bounded-unbounded-override-props",
- UnboundedMailboxTypes)
+ checkMailboxQueue(
+ Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
+ "bounded-unbounded-override-props",
+ UnboundedMailboxTypes)
}
"get a bounded deque-based message queue if configured and required" in {
- checkMailboxQueue(Props[StashQueueReportingActor],
- "bounded-deque-requirements-configured",
- BoundedDeqMailboxTypes)
+ checkMailboxQueue(
+ Props[StashQueueReportingActor],
+ "bounded-deque-requirements-configured",
+ BoundedDeqMailboxTypes)
}
"fail with a unbounded deque-based message queue if configured and required" in {
@@ -365,38 +378,44 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
"fail with a bounded deque-based message queue if not configured with Props" in {
intercept[ConfigurationException](
- system.actorOf(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
- "bounded-deque-require-unbounded-unconfigured-props"))
+ system.actorOf(
+ Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
+ "bounded-deque-require-unbounded-unconfigured-props"))
}
"get a bounded deque-based message queue if configured and required with Props (dispatcher)" in {
- checkMailboxQueue(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
- "bounded-deque-requirements-configured-props-disp",
- BoundedDeqMailboxTypes)
+ checkMailboxQueue(
+ Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
+ "bounded-deque-requirements-configured-props-disp",
+ BoundedDeqMailboxTypes)
}
"fail with a unbounded deque-based message queue if configured and required with Props (dispatcher)" in {
intercept[ConfigurationException](
- system.actorOf(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
- "bounded-deque-require-unbounded-configured-props-disp"))
+ system.actorOf(
+ Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
+ "bounded-deque-require-unbounded-configured-props-disp"))
}
"fail with a bounded deque-based message queue if not configured with Props (dispatcher)" in {
intercept[ConfigurationException](
- system.actorOf(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
- "bounded-deque-require-unbounded-unconfigured-props-disp"))
+ system.actorOf(
+ Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
+ "bounded-deque-require-unbounded-unconfigured-props-disp"))
}
"get a bounded deque-based message queue if configured and required with Props (mailbox)" in {
- checkMailboxQueue(Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.bounded-deque-based"),
- "bounded-deque-requirements-configured-props-mail",
- BoundedDeqMailboxTypes)
+ checkMailboxQueue(
+ Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.bounded-deque-based"),
+ "bounded-deque-requirements-configured-props-mail",
+ BoundedDeqMailboxTypes)
}
"fail with a unbounded deque-based message queue if configured and required with Props (mailbox)" in {
intercept[ConfigurationException](
- system.actorOf(Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
- "bounded-deque-require-unbounded-configured-props-mail"))
+ system.actorOf(
+ Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
+ "bounded-deque-require-unbounded-configured-props-mail"))
}
"fail with a bounded deque-based message queue if not configured with Props (mailbox)" in {
@@ -405,21 +424,24 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded message queue with a balancing dispatcher" in {
- checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-dispatcher"),
- "unbounded-balancing",
- UnboundedMailboxTypes)
+ checkMailboxQueue(
+ Props[QueueReportingActor].withDispatcher("balancing-dispatcher"),
+ "unbounded-balancing",
+ UnboundedMailboxTypes)
}
"get a bounded message queue with a balancing bounded dispatcher" in {
- checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"),
- "bounded-balancing",
- BoundedMailboxTypes)
+ checkMailboxQueue(
+ Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"),
+ "bounded-balancing",
+ BoundedMailboxTypes)
}
"get a bounded message queue with a requiring balancing bounded dispatcher" in {
- checkMailboxQueue(Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"),
- "requiring-bounded-balancing",
- BoundedMailboxTypes)
+ checkMailboxQueue(
+ Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"),
+ "requiring-bounded-balancing",
+ BoundedMailboxTypes)
}
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala
index 1083b0d2a4..82c88c0bd9 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala
@@ -252,11 +252,12 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout {
}
def check(looker: ActorRef): Unit = {
val lookname = looker.path.elements.mkString("", "/", "/")
- for ((l, r) <- Seq(SelectString("a/b/c") -> None,
- SelectString("akka://all-systems/Nobody") -> None,
- SelectPath(system / "hallo") -> None,
- SelectPath(looker.path.child("hallo")) -> None, // test Java API
- SelectPath(looker.path.descendant(Seq("a", "b").asJava)) -> None) // test Java API
+ for ((l, r) <- Seq(
+ SelectString("a/b/c") -> None,
+ SelectString("akka://all-systems/Nobody") -> None,
+ SelectPath(system / "hallo") -> None,
+ SelectPath(looker.path.child("hallo")) -> None, // test Java API
+ SelectPath(looker.path.descendant(Seq("a", "b").asJava)) -> None) // test Java API
) checkOne(looker, l, r)
}
for (looker <- all) check(looker)
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala
index dded2d38cd..0d3a8aa791 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala
@@ -75,16 +75,18 @@ object ActorSystemSpec {
class SlowDispatcher(_config: Config, _prerequisites: DispatcherPrerequisites)
extends MessageDispatcherConfigurator(_config, _prerequisites) {
- private val instance = new Dispatcher(this,
- config.getString("id"),
- config.getInt("throughput"),
- config.getNanosDuration("throughput-deadline-time"),
- configureExecutor(),
- config.getMillisDuration("shutdown-timeout")) {
+ private val instance = new Dispatcher(
+ this,
+ config.getString("id"),
+ config.getInt("throughput"),
+ config.getNanosDuration("throughput-deadline-time"),
+ configureExecutor(),
+ config.getMillisDuration("shutdown-timeout")) {
val doneIt = new Switch
- override protected[akka] def registerForExecution(mbox: Mailbox,
- hasMessageHint: Boolean,
- hasSystemMessageHint: Boolean): Boolean = {
+ override protected[akka] def registerForExecution(
+ mbox: Mailbox,
+ hasMessageHint: Boolean,
+ hasSystemMessageHint: Boolean): Boolean = {
val ret = super.registerForExecution(mbox, hasMessageHint, hasSystemMessageHint)
doneIt.switchOn {
TestKit.awaitCond(mbox.actor.actor != null, 1.second)
@@ -134,14 +136,15 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
}
"reject invalid names" in {
- for (n <- Seq("-hallowelt",
- "_hallowelt",
- "hallo*welt",
- "hallo@welt",
- "hallo#welt",
- "hallo$welt",
- "hallo%welt",
- "hallo/welt")) intercept[IllegalArgumentException] {
+ for (n <- Seq(
+ "-hallowelt",
+ "_hallowelt",
+ "hallo*welt",
+ "hallo@welt",
+ "hallo#welt",
+ "hallo$welt",
+ "hallo%welt",
+ "hallo/welt")) intercept[IllegalArgumentException] {
ActorSystem(n)
}
}
@@ -163,8 +166,9 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
.info(pattern = """from Actor\[akka://LogDeadLetters/system/testProbe.*not delivered""", occurrences = 1)
.intercept {
EventFilter
- .warning(pattern = """received dead letter from Actor\[akka://LogDeadLetters/system/testProbe""",
- occurrences = 1)
+ .warning(
+ pattern = """received dead letter from Actor\[akka://LogDeadLetters/system/testProbe""",
+ occurrences = 1)
.intercept {
a.tell("boom", probe.ref)
}(sys)
@@ -321,10 +325,11 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
"allow configuration of guardian supervisor strategy" in {
implicit val system =
- ActorSystem("Stop",
- ConfigFactory
- .parseString("akka.actor.guardian-supervisor-strategy=akka.actor.StoppingSupervisorStrategy")
- .withFallback(AkkaSpec.testConf))
+ ActorSystem(
+ "Stop",
+ ConfigFactory
+ .parseString("akka.actor.guardian-supervisor-strategy=akka.actor.StoppingSupervisorStrategy")
+ .withFallback(AkkaSpec.testConf))
val a = system.actorOf(Props(new Actor {
def receive = {
case "die" => throw new Exception("hello")
@@ -343,10 +348,11 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
"shut down when /user escalates" in {
implicit val system =
- ActorSystem("Stop",
- ConfigFactory
- .parseString("akka.actor.guardian-supervisor-strategy=\"akka.actor.ActorSystemSpec$Strategy\"")
- .withFallback(AkkaSpec.testConf))
+ ActorSystem(
+ "Stop",
+ ConfigFactory
+ .parseString("akka.actor.guardian-supervisor-strategy=\"akka.actor.ActorSystemSpec$Strategy\"")
+ .withFallback(AkkaSpec.testConf))
val a = system.actorOf(Props(new Actor {
def receive = {
case "die" => throw new Exception("hello")
@@ -403,12 +409,13 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
}
"not allow top-level actor creation with custom guardian" in {
- val sys = new ActorSystemImpl("custom",
- ConfigFactory.defaultReference(),
- getClass.getClassLoader,
- None,
- Some(Props.empty),
- ActorSystemSetup.empty)
+ val sys = new ActorSystemImpl(
+ "custom",
+ ConfigFactory.defaultReference(),
+ getClass.getClassLoader,
+ None,
+ Some(Props.empty),
+ ActorSystemSetup.empty)
sys.start()
try {
intercept[UnsupportedOperationException] {
diff --git a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala
index ddd4e2f72b..5966c4c68d 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala
@@ -107,18 +107,19 @@ class CoordinatedShutdownSpec
"have pre-defined phases from config" in {
import CoordinatedShutdown._
CoordinatedShutdown(system).orderedPhases should ===(
- List(PhaseBeforeServiceUnbind,
- PhaseServiceUnbind,
- PhaseServiceRequestsDone,
- PhaseServiceStop,
- PhaseBeforeClusterShutdown,
- PhaseClusterShardingShutdownRegion,
- PhaseClusterLeave,
- PhaseClusterExiting,
- PhaseClusterExitingDone,
- PhaseClusterShutdown,
- PhaseBeforeActorSystemTerminate,
- PhaseActorSystemTerminate))
+ List(
+ PhaseBeforeServiceUnbind,
+ PhaseServiceUnbind,
+ PhaseServiceRequestsDone,
+ PhaseServiceStop,
+ PhaseBeforeClusterShutdown,
+ PhaseClusterShardingShutdownRegion,
+ PhaseClusterLeave,
+ PhaseClusterExiting,
+ PhaseClusterExitingDone,
+ PhaseClusterShutdown,
+ PhaseBeforeActorSystemTerminate,
+ PhaseActorSystemTerminate))
}
"run ordered phases" in {
@@ -188,9 +189,10 @@ class CoordinatedShutdownSpec
"continue after timeout or failure" in {
import system.dispatcher
- val phases = Map("a" -> emptyPhase,
- "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = true, enabled = true),
- "c" -> phase("b", "a"))
+ val phases = Map(
+ "a" -> emptyPhase,
+ "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = true, enabled = true),
+ "c" -> phase("b", "a"))
val co = new CoordinatedShutdown(extSys, phases)
co.addTask("a", "a1") { () =>
testActor ! "A"
@@ -226,9 +228,10 @@ class CoordinatedShutdownSpec
}
"abort if recover=off" in {
- val phases = Map("a" -> emptyPhase,
- "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = true),
- "c" -> phase("b", "a"))
+ val phases = Map(
+ "a" -> emptyPhase,
+ "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = true),
+ "c" -> phase("b", "a"))
val co = new CoordinatedShutdown(extSys, phases)
co.addTask("b", "b1") { () =>
testActor ! "B"
@@ -247,9 +250,10 @@ class CoordinatedShutdownSpec
}
"skip tasks in disabled phase" in {
- val phases = Map("a" -> emptyPhase,
- "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = false),
- "c" -> phase("b", "a"))
+ val phases = Map(
+ "a" -> emptyPhase,
+ "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = false),
+ "c" -> phase("b", "a"))
val co = new CoordinatedShutdown(extSys, phases)
co.addTask("b", "b1") { () =>
testActor ! "B"
@@ -297,9 +301,10 @@ class CoordinatedShutdownSpec
}
}
""")) should ===(
- Map("a" -> Phase(dependsOn = Set.empty, timeout = 10.seconds, recover = true, enabled = true),
- "b" -> Phase(dependsOn = Set("a"), timeout = 15.seconds, recover = true, enabled = true),
- "c" -> Phase(dependsOn = Set("a", "b"), timeout = 10.seconds, recover = false, enabled = true)))
+ Map(
+ "a" -> Phase(dependsOn = Set.empty, timeout = 10.seconds, recover = true, enabled = true),
+ "b" -> Phase(dependsOn = Set("a"), timeout = 15.seconds, recover = true, enabled = true),
+ "c" -> Phase(dependsOn = Set("a", "b"), timeout = 10.seconds, recover = false, enabled = true)))
}
"default exit code to 0" in {
diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala
index 135a4a78f1..af46632fa4 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala
@@ -169,11 +169,12 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout =>
"fail a monitor which does not handle Terminated()" in {
filterEvents(EventFilter[ActorKilledException](), EventFilter[DeathPactException]()) {
val strategy = new OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider) {
- override def handleFailure(context: ActorContext,
- child: ActorRef,
- cause: Throwable,
- stats: ChildRestartStats,
- children: Iterable[ChildRestartStats]) = {
+ override def handleFailure(
+ context: ActorContext,
+ child: ActorRef,
+ cause: Throwable,
+ stats: ChildRestartStats,
+ children: Iterable[ChildRestartStats]) = {
testActor.tell(FF(Failed(child, cause, 0)), child)
super.handleFailure(context, child, cause, stats, children)
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala
index c8c8233f11..ab4db22020 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala
@@ -13,7 +13,8 @@ import akka.routing._
import scala.concurrent.duration._
object DeployerSpec {
- val deployerConf = ConfigFactory.parseString("""
+ val deployerConf = ConfigFactory.parseString(
+ """
akka.actor.deployment {
/service1 {
}
@@ -68,7 +69,7 @@ object DeployerSpec {
}
}
""",
- ConfigParseOptions.defaults)
+ ConfigParseOptions.defaults)
class RecipeActor extends Actor {
def receive = { case _ => }
@@ -85,12 +86,13 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
deployment should ===(
Some(
- Deploy(service,
- deployment.get.config,
- NoRouter,
- NoScopeGiven,
- Deploy.NoDispatcherGiven,
- Deploy.NoMailboxGiven)))
+ Deploy(
+ service,
+ deployment.get.config,
+ NoRouter,
+ NoScopeGiven,
+ Deploy.NoDispatcherGiven,
+ Deploy.NoMailboxGiven)))
}
"use None deployment for undefined service" in {
@@ -105,12 +107,13 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
deployment should ===(
Some(
- Deploy(service,
- deployment.get.config,
- NoRouter,
- NoScopeGiven,
- dispatcher = "my-dispatcher",
- Deploy.NoMailboxGiven)))
+ Deploy(
+ service,
+ deployment.get.config,
+ NoRouter,
+ NoScopeGiven,
+ dispatcher = "my-dispatcher",
+ Deploy.NoMailboxGiven)))
}
"be able to parse 'akka.actor.deployment._' with mailbox config" in {
@@ -119,18 +122,20 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
deployment should ===(
Some(
- Deploy(service,
- deployment.get.config,
- NoRouter,
- NoScopeGiven,
- Deploy.NoDispatcherGiven,
- mailbox = "my-mailbox")))
+ Deploy(
+ service,
+ deployment.get.config,
+ NoRouter,
+ NoScopeGiven,
+ Deploy.NoDispatcherGiven,
+ mailbox = "my-mailbox")))
}
"detect invalid number-of-instances" in {
intercept[com.typesafe.config.ConfigException.WrongType] {
val invalidDeployerConf = ConfigFactory
- .parseString("""
+ .parseString(
+ """
akka.actor.deployment {
/service-invalid-number-of-instances {
router = round-robin-pool
@@ -138,7 +143,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
}
}
""",
- ConfigParseOptions.defaults)
+ ConfigParseOptions.defaults)
.withFallback(AkkaSpec.testConf)
shutdown(ActorSystem("invalid-number-of-instances", invalidDeployerConf))
@@ -148,7 +153,8 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
"detect invalid deployment path" in {
val e = intercept[InvalidActorNameException] {
val invalidDeployerConf = ConfigFactory
- .parseString("""
+ .parseString(
+ """
akka.actor.deployment {
/gul/ubåt {
router = round-robin-pool
@@ -156,7 +162,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
}
}
""",
- ConfigParseOptions.defaults)
+ ConfigParseOptions.defaults)
.withFallback(AkkaSpec.testConf)
shutdown(ActorSystem("invalid-path", invalidDeployerConf))
@@ -182,9 +188,10 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
}
"be able to parse 'akka.actor.deployment._' with scatter-gather router" in {
- assertRouting("/service-scatter-gather",
- ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds),
- "/service-scatter-gather")
+ assertRouting(
+ "/service-scatter-gather",
+ ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds),
+ "/service-scatter-gather")
}
"be able to parse 'akka.actor.deployment._' with consistent-hashing router" in {
@@ -198,9 +205,10 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) {
"be able to use wildcards" in {
assertRouting("/some/wildcardmatch", RandomPool(1), "/some/*")
- assertRouting("/somewildcardmatch/some",
- ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds),
- "/*/some")
+ assertRouting(
+ "/somewildcardmatch/some",
+ ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds),
+ "/*/some")
}
"be able to use double wildcards" in {
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala
index 8a1009fcf6..608d08462c 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala
@@ -84,8 +84,9 @@ class ExtensionSpec extends WordSpec with Matchers {
"fail the actor system if an extension listed in akka.extensions fails to start" in {
intercept[RuntimeException] {
- val system = ActorSystem("failing",
- ConfigFactory.parseString("""
+ val system = ActorSystem(
+ "failing",
+ ConfigFactory.parseString("""
akka.extensions = ["akka.actor.FailingTestExtension"]
"""))
@@ -94,8 +95,9 @@ class ExtensionSpec extends WordSpec with Matchers {
}
"log an error if an extension listed in akka.extensions cannot be loaded" in {
- val system = ActorSystem("failing",
- ConfigFactory.parseString("""
+ val system = ActorSystem(
+ "failing",
+ ConfigFactory.parseString("""
akka.extensions = ["akka.actor.MissingExtension"]
"""))
EventFilter.error("While trying to load extension [akka.actor.MissingExtension], skipping.").intercept(())(system)
@@ -114,8 +116,9 @@ class ExtensionSpec extends WordSpec with Matchers {
"fail the actor system if a library-extension fails to start" in {
intercept[FailingTestExtension.TestException] {
- ActorSystem("failing",
- ConfigFactory.parseString("""
+ ActorSystem(
+ "failing",
+ ConfigFactory.parseString("""
akka.library-extensions += "akka.actor.FailingTestExtension"
""").withFallback(ConfigFactory.load()).resolve())
}
@@ -124,8 +127,9 @@ class ExtensionSpec extends WordSpec with Matchers {
"fail the actor system if a library-extension cannot be loaded" in {
intercept[RuntimeException] {
- ActorSystem("failing",
- ConfigFactory.parseString("""
+ ActorSystem(
+ "failing",
+ ConfigFactory.parseString("""
akka.library-extensions += "akka.actor.MissingExtension"
""").withFallback(ConfigFactory.load()))
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala
index 2c73ea6782..fe421dbaaf 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala
@@ -253,9 +253,10 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
"log events and transitions if asked to do so" in {
import scala.collection.JavaConverters._
val config = ConfigFactory
- .parseMap(Map("akka.loglevel" -> "DEBUG",
- "akka.actor.serialize-messages" -> "off",
- "akka.actor.debug.fsm" -> true).asJava)
+ .parseMap(Map(
+ "akka.loglevel" -> "DEBUG",
+ "akka.actor.serialize-messages" -> "off",
+ "akka.actor.debug.fsm" -> true).asJava)
.withFallback(system.settings.config)
val fsmEventSystem = ActorSystem("fsmEvent", config)
try {
diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala
index 4acdd62676..36d8c8282b 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala
@@ -109,8 +109,9 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
expectMsg(500 millis, Tick)
Thread.sleep(200) // this is ugly: need to wait for StateTimeout to be queued
resume(fsm)
- expectMsg(500 millis,
- Transition(fsm, TestCancelStateTimerInNamedTimerMessage, TestCancelStateTimerInNamedTimerMessage2))
+ expectMsg(
+ 500 millis,
+ Transition(fsm, TestCancelStateTimerInNamedTimerMessage, TestCancelStateTimerInNamedTimerMessage2))
fsm ! Cancel
within(500 millis) {
expectMsg(Cancel) // if this is not received, that means StateTimeout was not properly discarded
@@ -132,9 +133,10 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
"notify unhandled messages" taggedAs TimingTest in {
filterEvents(
EventFilter.warning("unhandled event Tick in state TestUnhandled", source = fsm.path.toString, occurrences = 1),
- EventFilter.warning("unhandled event Unhandled(test) in state TestUnhandled",
- source = fsm.path.toString,
- occurrences = 1)) {
+ EventFilter.warning(
+ "unhandled event Unhandled(test) in state TestUnhandled",
+ source = fsm.path.toString,
+ occurrences = 1)) {
fsm ! TestUnhandled
within(3 second) {
fsm ! Tick
diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala
index c8536bc1b5..e01c969128 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala
@@ -63,13 +63,14 @@ object SupervisorHierarchySpec {
case object PongOfDeath
final case class Event(msg: Any, identity: Long) { val time: Long = System.nanoTime }
final case class ErrorLog(msg: String, log: Vector[Event])
- final case class Failure(directive: Directive,
- stop: Boolean,
- depth: Int,
- var failPre: Int,
- var failPost: Int,
- val failConstr: Int,
- stopKids: Int)
+ final case class Failure(
+ directive: Directive,
+ stop: Boolean,
+ depth: Int,
+ var failPre: Int,
+ var failPost: Int,
+ val failConstr: Int,
+ stopKids: Int)
extends RuntimeException("Failure")
with NoStackTrace {
override def toString = productPrefix + productIterator.mkString("(", ",", ")")
@@ -89,12 +90,13 @@ object SupervisorHierarchySpec {
extends DispatcherConfigurator(config, prerequisites) {
private val instance: MessageDispatcher =
- new Dispatcher(this,
- config.getString("id"),
- config.getInt("throughput"),
- config.getNanosDuration("throughput-deadline-time"),
- configureExecutor(),
- config.getMillisDuration("shutdown-timeout")) {
+ new Dispatcher(
+ this,
+ config.getString("id"),
+ config.getInt("throughput"),
+ config.getNanosDuration("throughput-deadline-time"),
+ configureExecutor(),
+ config.getMillisDuration("shutdown-timeout")) {
override def suspend(cell: ActorCell): Unit = {
cell.actor match {
@@ -517,17 +519,18 @@ object SupervisorHierarchySpec {
nextJob.next match {
case Ping(ref) => ref ! "ping"
case Fail(ref, dir) =>
- val f = Failure(dir,
- stop = random012 > 0,
- depth = random012,
- failPre = random012,
- failPost = random012,
- failConstr = random012,
- stopKids = random012 match {
- case 0 => 0
- case 1 => random.nextInt(breadth / 2)
- case 2 => 1000
- })
+ val f = Failure(
+ dir,
+ stop = random012 > 0,
+ depth = random012,
+ failPre = random012,
+ failPost = random012,
+ failConstr = random012,
+ stopKids = random012 match {
+ case 0 => 0
+ case 1 => random.nextInt(breadth / 2)
+ case 2 => 1000
+ })
ref ! f
}
if (idleChildren.nonEmpty) self ! Work
@@ -843,11 +846,12 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w
val preStartCalled = new AtomicInteger(0)
val postRestartCalled = new AtomicInteger(0)
- filterEvents(EventFilter[Failure](),
- EventFilter[ActorInitializationException](),
- EventFilter[IllegalArgumentException]("OH NO!"),
- EventFilter.error(start = "changing Recreate into Create"),
- EventFilter.error(start = "changing Resume into Create")) {
+ filterEvents(
+ EventFilter[Failure](),
+ EventFilter[ActorInitializationException](),
+ EventFilter[IllegalArgumentException]("OH NO!"),
+ EventFilter.error(start = "changing Recreate into Create"),
+ EventFilter.error(start = "changing Resume into Create")) {
val failResumer =
system.actorOf(
Props(new Actor {
@@ -892,14 +896,15 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w
"survive being stressed" taggedAs LongRunningTest in {
system.eventStream.publish(
- Mute(EventFilter[Failure](),
- EventFilter.warning("Failure"),
- EventFilter[ActorInitializationException](),
- EventFilter[NoSuchElementException]("head of empty list"),
- EventFilter.error(start = "changing Resume into Restart"),
- EventFilter.error(start = "changing Resume into Create"),
- EventFilter.error(start = "changing Recreate into Create"),
- EventFilter.warning(start = "received dead ")))
+ Mute(
+ EventFilter[Failure](),
+ EventFilter.warning("Failure"),
+ EventFilter[ActorInitializationException](),
+ EventFilter[NoSuchElementException]("head of empty list"),
+ EventFilter.error(start = "changing Resume into Restart"),
+ EventFilter.error(start = "changing Resume into Create"),
+ EventFilter.error(start = "changing Recreate into Create"),
+ EventFilter.warning(start = "received dead ")))
val fsm = system.actorOf(Props(new StressTest(testActor, size = 500, breadth = 6)), "stressTest")
diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala
index 1248d8134d..4f68cd3156 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala
@@ -132,9 +132,10 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul
"be able to create a similar kid in the fault handling strategy" in {
val parent = system.actorOf(Props(new Actor {
override val supervisorStrategy = new OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider) {
- override def handleChildTerminated(context: ActorContext,
- child: ActorRef,
- children: Iterable[ActorRef]): Unit = {
+ override def handleChildTerminated(
+ context: ActorContext,
+ child: ActorRef,
+ children: Iterable[ActorRef]): Unit = {
val newKid = context.actorOf(Props.empty, child.path.name)
testActor ! { if ((newKid ne child) && newKid.path == child.path) "green" else "red" }
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala
index 03c66eb4c6..e3478d7605 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala
@@ -417,9 +417,10 @@ class SupervisorSpec
supervisor ! dyingProps
val dyingActor = expectMsgType[ActorRef]
- filterEvents(EventFilter[RuntimeException]("Expected", occurrences = 1),
- EventFilter[PreRestartException]("Don't wanna!", occurrences = 1),
- EventFilter[PostRestartException]("Don't wanna!", occurrences = 1)) {
+ filterEvents(
+ EventFilter[RuntimeException]("Expected", occurrences = 1),
+ EventFilter[PreRestartException]("Don't wanna!", occurrences = 1),
+ EventFilter[PostRestartException]("Don't wanna!", occurrences = 1)) {
intercept[RuntimeException] {
Await.result(dyingActor.?(DieReply)(DilatedTimeout), DilatedTimeout)
}
@@ -468,8 +469,9 @@ class SupervisorSpec
parent ! latch
parent ! "testchildAndAck"
expectMsg("ack")
- filterEvents(EventFilter[IllegalStateException]("OHNOES", occurrences = 1),
- EventFilter.warning(pattern = "dead.*test", occurrences = 1)) {
+ filterEvents(
+ EventFilter[IllegalStateException]("OHNOES", occurrences = 1),
+ EventFilter.warning(pattern = "dead.*test", occurrences = 1)) {
latch.countDown()
}
expectMsg("parent restarted")
diff --git a/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala
index 12a342cdc0..776796f73b 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala
@@ -149,19 +149,21 @@ object TimerSpec {
class TimerSpec extends AbstractTimerSpec {
override def testName: String = "Timers"
- override def target(monitor: ActorRef,
- interval: FiniteDuration,
- repeat: Boolean,
- initial: () => Int = () => 1): Props =
+ override def target(
+ monitor: ActorRef,
+ interval: FiniteDuration,
+ repeat: Boolean,
+ initial: () => Int = () => 1): Props =
TimerSpec.target(monitor, interval, repeat, initial)
}
class FsmTimerSpec extends AbstractTimerSpec {
override def testName: String = "FSM Timers"
- override def target(monitor: ActorRef,
- interval: FiniteDuration,
- repeat: Boolean,
- initial: () => Int = () => 1): Props =
+ override def target(
+ monitor: ActorRef,
+ interval: FiniteDuration,
+ repeat: Boolean,
+ initial: () => Int = () => 1): Props =
TimerSpec.fsmTarget(monitor, interval, repeat, initial)
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala
index e597909b8a..328e129672 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala
@@ -393,8 +393,9 @@ class TypedActorSpec
case p: TypedProps[_] => context.sender() ! TypedActor(context).typedActorOf(p)
}
}))
- val t = Await.result((boss ? TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(2 seconds)).mapTo[Foo],
- timeout.duration)
+ val t = Await.result(
+ (boss ? TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(2 seconds)).mapTo[Foo],
+ timeout.duration)
t.incr()
t.failingPigdog()
diff --git a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala
index 66a255880c..ff981ab7d0 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala
@@ -18,9 +18,10 @@ object UidClashTest {
@volatile var oldActor: ActorRef = _
- private[akka] class EvilCollidingActorRef(override val provider: ActorRefProvider,
- override val path: ActorPath,
- val eventStream: EventStream)
+ private[akka] class EvilCollidingActorRef(
+ override val provider: ActorRefProvider,
+ override val path: ActorPath,
+ val eventStream: EventStream)
extends MinimalActorRef {
//Ignore everything
diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala
index d1cad439f9..ad4edfa04d 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala
@@ -172,11 +172,12 @@ object ActorModelSpec {
} catch {
case e: Throwable =>
system.eventStream.publish(
- Error(e,
- dispatcher.toString,
- dispatcher.getClass,
- "actual: stops=" + dispatcher.stops.get +
- " required: stops=" + stops))
+ Error(
+ e,
+ dispatcher.toString,
+ dispatcher.getClass,
+ "actual: stops=" + dispatcher.stops.get +
+ " required: stops=" + stops))
throw e
}
}
@@ -213,9 +214,10 @@ object ActorModelSpec {
msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(),
msgsProcessed: Long = statsFor(actorRef, dispatcher).msgsProcessed.get(),
restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem): Unit = {
- val stats = statsFor(actorRef,
- Option(dispatcher).getOrElse(
- actorRef.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].dispatcher))
+ val stats = statsFor(
+ actorRef,
+ Option(dispatcher).getOrElse(
+ actorRef.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].dispatcher))
val deadline = System.currentTimeMillis + 1000
try {
await(deadline)(stats.suspensions.get() == suspensions)
@@ -228,12 +230,13 @@ object ActorModelSpec {
} catch {
case e: Throwable =>
system.eventStream.publish(
- Error(e,
- Option(dispatcher).toString,
- Option(dispatcher).getOrElse(this).getClass,
- "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions +
- ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters +
- ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts))
+ Error(
+ e,
+ Option(dispatcher).toString,
+ Option(dispatcher).getOrElse(this).getClass,
+ "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions +
+ ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters +
+ ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts))
throw e
}
}
@@ -276,13 +279,14 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
assertDispatcher(dispatcher)(stops = 0)
system.stop(a)
assertDispatcher(dispatcher)(stops = 1)
- assertRef(a, dispatcher)(suspensions = 0,
- resumes = 0,
- registers = 1,
- unregisters = 1,
- msgsReceived = 0,
- msgsProcessed = 0,
- restarts = 0)
+ assertRef(a, dispatcher)(
+ suspensions = 0,
+ resumes = 0,
+ registers = 1,
+ unregisters = 1,
+ msgsReceived = 0,
+ msgsProcessed = 0,
+ restarts = 0)
for (i <- 1 to 10) yield Future { i }
assertDispatcher(dispatcher)(stops = 2)
@@ -359,12 +363,13 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1, suspensions = 1, resumes = 1)
system.stop(a)
- assertRefDefaultZero(a)(registers = 1,
- unregisters = 1,
- msgsReceived = 1,
- msgsProcessed = 1,
- suspensions = 1,
- resumes = 1)
+ assertRefDefaultZero(a)(
+ registers = 1,
+ unregisters = 1,
+ msgsReceived = 1,
+ msgsProcessed = 1,
+ suspensions = 1,
+ resumes = 1)
}
"handle waves of actors" in {
@@ -432,9 +437,10 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
}
"continue to process messages when a thread gets interrupted and throws an exception" in {
- filterEvents(EventFilter[InterruptedException](),
- EventFilter[ActorInterruptedException](),
- EventFilter[akka.event.Logging.LoggerException]()) {
+ filterEvents(
+ EventFilter[InterruptedException](),
+ EventFilter[ActorInterruptedException](),
+ EventFilter[akka.event.Logging.LoggerException]()) {
implicit val dispatcher = interceptedDispatcher()
val a = newTestActor(dispatcher.id)
val f1 = a ? Reply("foo")
@@ -541,12 +547,13 @@ object DispatcherModelSpec {
import akka.util.Helpers.ConfigOps
private val instance: MessageDispatcher =
- new Dispatcher(this,
- config.getString("id"),
- config.getInt("throughput"),
- config.getNanosDuration("throughput-deadline-time"),
- configureExecutor(),
- config.getMillisDuration("shutdown-timeout")) with MessageDispatcherInterceptor
+ new Dispatcher(
+ this,
+ config.getString("id"),
+ config.getInt("throughput"),
+ config.getNanosDuration("throughput-deadline-time"),
+ configureExecutor(),
+ config.getMillisDuration("shutdown-timeout")) with MessageDispatcherInterceptor
override def dispatcher(): MessageDispatcher = instance
}
@@ -617,14 +624,15 @@ object BalancingDispatcherModelSpec {
import akka.util.Helpers.ConfigOps
override protected def create(mailboxType: MailboxType): BalancingDispatcher =
- new BalancingDispatcher(this,
- config.getString("id"),
- config.getInt("throughput"),
- config.getNanosDuration("throughput-deadline-time"),
- mailboxType,
- configureExecutor(),
- config.getMillisDuration("shutdown-timeout"),
- config.getBoolean("attempt-teamwork")) with MessageDispatcherInterceptor
+ new BalancingDispatcher(
+ this,
+ config.getString("id"),
+ config.getInt("throughput"),
+ config.getNanosDuration("throughput-deadline-time"),
+ mailboxType,
+ configureExecutor(),
+ config.getMillisDuration("shutdown-timeout"),
+ config.getBoolean("attempt-teamwork")) with MessageDispatcherInterceptor
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala
index 86aee39be0..c02dbb8198 100644
--- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala
@@ -126,10 +126,11 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn
q.hasMessages should ===(false)
}
- def testEnqueueDequeue(config: MailboxType,
- enqueueN: Int = 10000,
- dequeueN: Int = 10000,
- parallel: Boolean = true): Unit = within(10 seconds) {
+ def testEnqueueDequeue(
+ config: MailboxType,
+ enqueueN: Int = 10000,
+ dequeueN: Int = 10000,
+ parallel: Boolean = true): Unit = within(10 seconds) {
val q = factory(config)
ensureInitialMailboxState(config, q)
diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala
index aaeeef172a..fae9f2b9bc 100644
--- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala
@@ -109,10 +109,12 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) {
sys.eventStream.subscribe(testActor, classOf[AnyRef])
val m = UnhandledMessage(42, sys.deadLetters, sys.deadLetters)
sys.eventStream.publish(m)
- expectMsgAllOf(m,
- Logging.Debug(sys.deadLetters.path.toString,
- sys.deadLetters.getClass,
- "unhandled message from " + sys.deadLetters + ": 42"))
+ expectMsgAllOf(
+ m,
+ Logging.Debug(
+ sys.deadLetters.path.toString,
+ sys.deadLetters.getClass,
+ "unhandled message from " + sys.deadLetters + ": 42"))
sys.eventStream.unsubscribe(testActor)
} finally {
shutdown(sys)
diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala
index de8fba3ad5..a4e0ddff89 100644
--- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala
@@ -73,10 +73,12 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll {
})
}))
a ! "hallo"
- expectMsg(1 second,
- Logging.Debug("funky",
- classOf[DummyClassForStringSources],
- "received unhandled message hallo from " + system.deadLetters))
+ expectMsg(
+ 1 second,
+ Logging.Debug(
+ "funky",
+ classOf[DummyClassForStringSources],
+ "received unhandled message hallo from " + system.deadLetters))
expectMsgType[UnhandledMessage](1 second)
}
}
@@ -272,9 +274,10 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll {
}
system.stop(supervisor)
- expectMsgAllOf(Logging.Debug(aname, aclass, "stopped"),
- Logging.Debug(sname, sclass, "stopping"),
- Logging.Debug(sname, sclass, "stopped"))
+ expectMsgAllOf(
+ Logging.Debug(aname, aclass, "stopped"),
+ Logging.Debug(sname, sclass, "stopping"),
+ Logging.Debug(sname, sclass, "stopped"))
}
def expectMsgAllPF(messages: Int)(matchers: PartialFunction[AnyRef, Int]): Set[Int] = {
@@ -285,9 +288,10 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll {
else if (gotMatching.size == messages) gotMatching
else {
val msg = receiveOne(remainingOrDefault)
- assert(msg ne null,
- s"timeout ($max) during expectMsgAllPF, got matching " +
- s"[${gotMatching.mkString(", ")}], got unknown: [${unknown.mkString(", ")}]")
+ assert(
+ msg ne null,
+ s"timeout ($max) during expectMsgAllPF, got matching " +
+ s"[${gotMatching.mkString(", ")}], got unknown: [${unknown.mkString(", ")}]")
if (matchers.isDefinedAt(msg)) receiveNMatching(gotMatching + matchers(msg), Vector.empty)
else receiveNMatching(gotMatching, unknown :+ msg) // unknown message, just ignore
}
diff --git a/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala b/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala
index 5e06b9e45b..5dc006f866 100644
--- a/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala
@@ -9,10 +9,11 @@ import akka.testkit._
class MarkerLoggingSpec extends AkkaSpec with ImplicitSender {
"A MarkerLoggerAdapter" should {
- val markerLogging = new MarkerLoggingAdapter(system.eventStream,
- getClass.getName,
- this.getClass,
- new DefaultLoggingFilter(() => Logging.InfoLevel))
+ val markerLogging = new MarkerLoggingAdapter(
+ system.eventStream,
+ getClass.getName,
+ this.getClass,
+ new DefaultLoggingFilter(() => Logging.InfoLevel))
"add markers to logging" in {
system.eventStream.subscribe(self, classOf[Info])
diff --git a/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala
index 9fe9d64715..27f33c42a5 100644
--- a/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala
@@ -89,9 +89,10 @@ class InetAddressDnsResolverSpec extends AkkaSpec("""
private def dnsResolver = {
val actorRef = TestActorRef[InetAddressDnsResolver](
- Props(classOf[InetAddressDnsResolver],
- new SimpleDnsCache(),
- system.settings.config.getConfig("akka.io.dns.inet-address")))
+ Props(
+ classOf[InetAddressDnsResolver],
+ new SimpleDnsCache(),
+ system.settings.config.getConfig("akka.io.dns.inet-address")))
actorRef.underlyingActor
}
@@ -136,9 +137,10 @@ class InetAddressDnsResolverConfigSpec extends AkkaSpec("""
private def dnsResolver = {
val actorRef = TestActorRef[InetAddressDnsResolver](
- Props(classOf[InetAddressDnsResolver],
- new SimpleDnsCache(),
- system.settings.config.getConfig("akka.io.dns.inet-address")))
+ Props(
+ classOf[InetAddressDnsResolver],
+ new SimpleDnsCache(),
+ system.settings.config.getConfig("akka.io.dns.inet-address")))
actorRef.underlyingActor
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala
index 4e929c1a89..38c85d3d7c 100644
--- a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala
@@ -912,10 +912,11 @@ class TcpConnectionSpec extends AkkaSpec("""
def setServerSocketOptions() = ()
- def createConnectionActor(serverAddress: InetSocketAddress = serverAddress,
- options: immutable.Seq[SocketOption] = Nil,
- timeout: Option[FiniteDuration] = None,
- pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = {
+ def createConnectionActor(
+ serverAddress: InetSocketAddress = serverAddress,
+ options: immutable.Seq[SocketOption] = Nil,
+ timeout: Option[FiniteDuration] = None,
+ pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = {
val ref = createConnectionActorWithoutRegistration(serverAddress, options, timeout, pullMode)
ref ! newChannelRegistration
ref
@@ -930,15 +931,17 @@ class TcpConnectionSpec extends AkkaSpec("""
protected def onCancelAndClose(andThen: () => Unit): Unit = andThen()
- def createConnectionActorWithoutRegistration(serverAddress: InetSocketAddress = serverAddress,
- options: immutable.Seq[SocketOption] = Nil,
- timeout: Option[FiniteDuration] = None,
- pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] =
+ def createConnectionActorWithoutRegistration(
+ serverAddress: InetSocketAddress = serverAddress,
+ options: immutable.Seq[SocketOption] = Nil,
+ timeout: Option[FiniteDuration] = None,
+ pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] =
TestActorRef(
- new TcpOutgoingConnection(Tcp(system),
- this,
- userHandler.ref,
- Connect(serverAddress, options = options, timeout = timeout, pullMode = pullMode)) {
+ new TcpOutgoingConnection(
+ Tcp(system),
+ this,
+ userHandler.ref,
+ Connect(serverAddress, options = options, timeout = timeout, pullMode = pullMode)) {
override def postRestart(reason: Throwable): Unit = context.stop(self) // ensure we never restart
})
}
@@ -960,9 +963,10 @@ class TcpConnectionSpec extends AkkaSpec("""
}
}
- abstract class EstablishedConnectionTest(keepOpenOnPeerClosed: Boolean = false,
- useResumeWriting: Boolean = true,
- pullMode: Boolean = false)
+ abstract class EstablishedConnectionTest(
+ keepOpenOnPeerClosed: Boolean = false,
+ useResumeWriting: Boolean = true,
+ pullMode: Boolean = false)
extends UnacceptedConnectionTest(pullMode) {
// lazy init since potential exceptions should not be triggered in the constructor but during execution of `run`
@@ -1054,9 +1058,10 @@ class TcpConnectionSpec extends AkkaSpec("""
/**
* Tries to simultaneously act on client and server side to read from the server all pending data from the client.
*/
- @tailrec final def pullFromServerSide(remaining: Int,
- remainingTries: Int = 1000,
- into: ByteBuffer = defaultbuffer): Unit =
+ @tailrec final def pullFromServerSide(
+ remaining: Int,
+ remainingTries: Int = 1000,
+ into: ByteBuffer = defaultbuffer): Unit =
if (remainingTries <= 0)
throw new AssertionError("Pulling took too many loops, remaining data: " + remaining)
else if (remaining > 0) {
@@ -1109,11 +1114,10 @@ class TcpConnectionSpec extends AkkaSpec("""
def selectedAs(interest: Int, duration: Duration): BeMatcher[SelectionKey] =
new BeMatcher[SelectionKey] {
def apply(key: SelectionKey) =
- MatchResult(checkFor(key, interest, duration.toMillis.toInt),
- "%s key was not selected for %s after %s".format(key.attachment(),
- interestsDesc(interest),
- duration),
- "%s key was selected for %s after %s".format(key.attachment(), interestsDesc(interest), duration))
+ MatchResult(
+ checkFor(key, interest, duration.toMillis.toInt),
+ "%s key was not selected for %s after %s".format(key.attachment(), interestsDesc(interest), duration),
+ "%s key was selected for %s after %s".format(key.attachment(), interestsDesc(interest), duration))
}
val interestsNames =
diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala
index 1b9a67a29d..3e98b205f5 100644
--- a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala
@@ -195,11 +195,12 @@ class TcpIntegrationSpec extends AkkaSpec("""
}
}
- def chitchat(clientHandler: TestProbe,
- clientConnection: ActorRef,
- serverHandler: TestProbe,
- serverConnection: ActorRef,
- rounds: Int = 100) = {
+ def chitchat(
+ clientHandler: TestProbe,
+ clientConnection: ActorRef,
+ serverHandler: TestProbe,
+ serverConnection: ActorRef,
+ rounds: Int = 100) = {
val testData = ByteString(0)
(1 to rounds).foreach { _ =>
diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala
index 477d89e266..de7a808fb6 100644
--- a/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala
@@ -175,12 +175,13 @@ class TcpListenerSpec extends AkkaSpec("""
private class ListenerParent(pullMode: Boolean) extends Actor with ChannelRegistry {
val listener = context.actorOf(
- props = Props(classOf[TcpListener],
- selectorRouter.ref,
- Tcp(system),
- this,
- bindCommander.ref,
- Bind(handler.ref, endpoint, 100, Nil, pullMode)).withDeploy(Deploy.local),
+ props = Props(
+ classOf[TcpListener],
+ selectorRouter.ref,
+ Tcp(system),
+ this,
+ bindCommander.ref,
+ Bind(handler.ref, endpoint, 100, Nil, pullMode)).withDeploy(Deploy.local),
name = "test-listener-" + counter.next())
parent.watch(listener)
def receive: Receive = {
diff --git a/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala
index b2a4bf93fe..62f3827a14 100644
--- a/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala
@@ -24,9 +24,10 @@ class UdpConnectedIntegrationSpec extends AkkaSpec("""
commander.sender()
}
- def connectUdp(localAddress: Option[InetSocketAddress],
- remoteAddress: InetSocketAddress,
- handler: ActorRef): ActorRef = {
+ def connectUdp(
+ localAddress: Option[InetSocketAddress],
+ remoteAddress: InetSocketAddress,
+ handler: ActorRef): ActorRef = {
val commander = TestProbe()
commander.send(IO(UdpConnected), UdpConnected.Connect(handler, remoteAddress, localAddress, Nil))
commander.expectMsg(UdpConnected.Connected)
diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala
index 22ccdc954c..3ec8289716 100644
--- a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala
@@ -61,8 +61,9 @@ class AsyncDnsResolverIntegrationSpec extends AkkaSpec(s"""
val name = "a-double.foo.test"
val answer = resolve(name)
answer.name shouldEqual name
- answer.records.map(_.asInstanceOf[ARecord].ip).toSet shouldEqual Set(InetAddress.getByName("192.168.1.21"),
- InetAddress.getByName("192.168.1.22"))
+ answer.records.map(_.asInstanceOf[ARecord].ip).toSet shouldEqual Set(
+ InetAddress.getByName("192.168.1.21"),
+ InetAddress.getByName("192.168.1.22"))
}
"resolve single AAAA record" in {
@@ -87,8 +88,9 @@ class AsyncDnsResolverIntegrationSpec extends AkkaSpec(s"""
val answer = resolve(name)
answer.name shouldEqual name
- answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.1.23"),
- InetAddress.getByName("192.168.1.24"))
+ answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(
+ InetAddress.getByName("192.168.1.23"),
+ InetAddress.getByName("192.168.1.24"))
answer.records.collect { case r: AAAARecord => r.ip }.toSet shouldEqual Set(
InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:4"),
@@ -108,8 +110,9 @@ class AsyncDnsResolverIntegrationSpec extends AkkaSpec(s"""
val answer = resolve(name)
answer.name shouldEqual name
answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set("a-double.foo.test")
- answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.1.21"),
- InetAddress.getByName("192.168.1.22"))
+ answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(
+ InetAddress.getByName("192.168.1.21"),
+ InetAddress.getByName("192.168.1.22"))
}
"resolve SRV record" in {
diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala
index 531b576433..55bb14bfa3 100644
--- a/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala
@@ -17,8 +17,9 @@ class DnsSettingsSpec extends AkkaSpec {
"DNS settings" must {
"use host servers if set to default" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = "default"
resolve-timeout = 1s
search-domains = []
@@ -30,8 +31,9 @@ class DnsSettingsSpec extends AkkaSpec {
}
"parse a single name server" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = "127.0.0.1"
resolve-timeout = 1s
search-domains = []
@@ -42,21 +44,24 @@ class DnsSettingsSpec extends AkkaSpec {
}
"parse a list of name servers" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = ["127.0.0.1", "127.0.0.2"]
resolve-timeout = 1s
search-domains = []
ndots = 1
"""))
- dnsSettings.NameServers.map(_.getAddress) shouldEqual List(InetAddress.getByName("127.0.0.1"),
- InetAddress.getByName("127.0.0.2"))
+ dnsSettings.NameServers.map(_.getAddress) shouldEqual List(
+ InetAddress.getByName("127.0.0.1"),
+ InetAddress.getByName("127.0.0.2"))
}
"use host search domains if set to default" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = "127.0.0.1"
resolve-timeout = 1s
search-domains = "default"
@@ -68,8 +73,9 @@ class DnsSettingsSpec extends AkkaSpec {
}
"parse a single search domain" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = "127.0.0.1"
resolve-timeout = 1s
search-domains = "example.com"
@@ -80,8 +86,9 @@ class DnsSettingsSpec extends AkkaSpec {
}
"parse a single list of search domains" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = "127.0.0.1"
resolve-timeout = 1s
search-domains = [ "example.com", "example.net" ]
@@ -92,8 +99,9 @@ class DnsSettingsSpec extends AkkaSpec {
}
"use host ndots if set to default" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = "127.0.0.1"
resolve-timeout = 1s
search-domains = "example.com"
@@ -105,8 +113,9 @@ class DnsSettingsSpec extends AkkaSpec {
}
"parse ndots" in {
- val dnsSettings = new DnsSettings(eas,
- ConfigFactory.parseString("""
+ val dnsSettings = new DnsSettings(
+ eas,
+ ConfigFactory.parseString("""
nameservers = "127.0.0.1"
resolve-timeout = 1s
search-domains = "example.com"
diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala b/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala
index 3339f7555c..74ff325b18 100644
--- a/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala
@@ -46,8 +46,9 @@ trait DockerBindDnsService extends Eventually { self: AkkaSpec =>
.hostConfig(
HostConfig
.builder()
- .portBindings(Map("53/tcp" -> List(PortBinding.of("", hostPort)).asJava,
- "53/udp" -> List(PortBinding.of("", hostPort)).asJava).asJava)
+ .portBindings(Map(
+ "53/tcp" -> List(PortBinding.of("", hostPort)).asJava,
+ "53/udp" -> List(PortBinding.of("", hostPort)).asJava).asJava)
.binds(HostConfig.Bind
.from(new java.io.File("akka-actor-tests/src/test/bind/").getAbsolutePath)
.to("/data/bind")
diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala
index 9d3b9fdd6e..d1e82f57b9 100644
--- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala
@@ -134,8 +134,9 @@ class AsyncDnsResolverSpec extends AkkaSpec("""
}
def resolver(clients: List[ActorRef]): ActorRef = {
- val settings = new DnsSettings(system.asInstanceOf[ExtendedActorSystem],
- ConfigFactory.parseString("""
+ val settings = new DnsSettings(
+ system.asInstanceOf[ExtendedActorSystem],
+ ConfigFactory.parseString("""
nameservers = ["one","two"]
resolve-timeout = 300ms
search-domains = []
diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala
index d96a22f799..2a3f88732f 100644
--- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala
@@ -143,8 +143,9 @@ class AskSpec extends AkkaSpec {
val deadListener = TestProbe()
system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter])
- val echo = system.actorOf(Props(new Actor { def receive = { case x => context.actorSelection("/temp/*") ! x } }),
- "select-echo3")
+ val echo = system.actorOf(
+ Props(new Actor { def receive = { case x => context.actorSelection("/temp/*") ! x } }),
+ "select-echo3")
val f = echo ? "hi"
intercept[AskTimeoutException] {
Await.result(f, 1 seconds)
diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala
index 3b4bf069b7..de2851f548 100644
--- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala
@@ -141,12 +141,13 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec with ImplicitSender {
"accept commands while child is terminating" in {
val postStopLatch = new CountDownLatch(1)
val options = Backoff
- .onFailure(Props(new SlowlyFailingActor(postStopLatch)),
- "someChildName",
- 1 nanos,
- 1 nanos,
- 0.0,
- maxNrOfRetries = -1)
+ .onFailure(
+ Props(new SlowlyFailingActor(postStopLatch)),
+ "someChildName",
+ 1 nanos,
+ 1 nanos,
+ 0.0,
+ maxNrOfRetries = -1)
.withSupervisorStrategy(OneForOneStrategy(loggingEnabled = false) {
case _: TestActor.StoppingException => SupervisorStrategy.Stop
})
diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala
index dcb7847395..66a189b96e 100644
--- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala
@@ -222,20 +222,22 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually
"correctly calculate the delay" in {
val delayTable =
- Table(("restartCount", "minBackoff", "maxBackoff", "randomFactor", "expectedResult"),
- (0, 0.minutes, 0.minutes, 0d, 0.minutes),
- (0, 5.minutes, 7.minutes, 0d, 5.minutes),
- (2, 5.seconds, 7.seconds, 0d, 7.seconds),
- (2, 5.seconds, 7.days, 0d, 20.seconds),
- (29, 5.minutes, 10.minutes, 0d, 10.minutes),
- (29, 10000.days, 10000.days, 0d, 10000.days),
- (Int.MaxValue, 10000.days, 10000.days, 0d, 10000.days))
+ Table(
+ ("restartCount", "minBackoff", "maxBackoff", "randomFactor", "expectedResult"),
+ (0, 0.minutes, 0.minutes, 0d, 0.minutes),
+ (0, 5.minutes, 7.minutes, 0d, 5.minutes),
+ (2, 5.seconds, 7.seconds, 0d, 7.seconds),
+ (2, 5.seconds, 7.days, 0d, 20.seconds),
+ (29, 5.minutes, 10.minutes, 0d, 10.minutes),
+ (29, 10000.days, 10000.days, 0d, 10000.days),
+ (Int.MaxValue, 10000.days, 10000.days, 0d, 10000.days))
forAll(delayTable) {
- (restartCount: Int,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- expectedResult: FiniteDuration) =>
+ (
+ restartCount: Int,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ expectedResult: FiniteDuration) =>
val calculatedValue = BackoffSupervisor.calculateDelay(restartCount, minBackoff, maxBackoff, randomFactor)
assert(calculatedValue === expectedResult)
}
diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala
index 7b6aa86cb0..9173805532 100644
--- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala
@@ -670,9 +670,10 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter with MockitoSugar
val breaker: CircuitBreakerSpec.Breaker = CircuitBreakerSpec.multiFailureCb()
for (_ <- 1 to 4) breaker().withCircuitBreaker(Future(throwException))
- awaitCond(breaker().currentFailureCount == 4,
- awaitTimeout,
- message = s"Current failure count: ${breaker().currentFailureCount}")
+ awaitCond(
+ breaker().currentFailureCount == 4,
+ awaitTimeout,
+ message = s"Current failure count: ${breaker().currentFailureCount}")
val harmlessException = new TestException
val harmlessExceptionAsSuccess: Try[String] => Boolean = {
diff --git a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala
index 3c5f0d2b16..857d4174a2 100644
--- a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala
@@ -27,13 +27,14 @@ class RetrySpec extends AkkaSpec with RetrySupport {
"run a successful Future only once" in {
@volatile var counter = 0
- val retried = retry(() =>
- Future.successful({
- counter += 1
- counter
- }),
- 5,
- 1 second)
+ val retried = retry(
+ () =>
+ Future.successful({
+ counter += 1
+ counter
+ }),
+ 5,
+ 1 second)
within(3 seconds) {
Await.result(retried, remaining) should ===(1)
diff --git a/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala
index 72bb37d7e8..f6bfe47c90 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala
@@ -96,8 +96,9 @@ class BalancingSpec extends AkkaSpec("""
"deliver messages in a balancing fashion when defined programatically" in {
val latch = TestLatch(poolSize)
- val pool = system.actorOf(BalancingPool(poolSize).props(routeeProps = Props(classOf[Worker], latch)),
- name = "balancingPool-1")
+ val pool = system.actorOf(
+ BalancingPool(poolSize).props(routeeProps = Props(classOf[Worker], latch)),
+ name = "balancingPool-1")
test(pool, latch)
}
diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala
index 6164b0d439..0c70f6b841 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala
@@ -84,8 +84,9 @@ class ConsistentHashingRouterSpec
case Msg2(key, _) => key
}
val router2 =
- system.actorOf(ConsistentHashingPool(nrOfInstances = 1, hashMapping = hashMapping).props(Props[Echo]),
- "router2")
+ system.actorOf(
+ ConsistentHashingPool(nrOfInstances = 1, hashMapping = hashMapping).props(Props[Echo]),
+ "router2")
router2 ! Msg2("a", "A")
val destinationA = expectMsgType[ActorRef]
diff --git a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala
index e798a9670b..bc696c6fb9 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala
@@ -45,9 +45,10 @@ object MetricsBasedResizerSpec {
var msgs: Set[TestLatch] = Set()
- def mockSend(await: Boolean,
- l: TestLatch = TestLatch(),
- routeeIdx: Int = Random.nextInt(routees.length)): Latches = {
+ def mockSend(
+ await: Boolean,
+ l: TestLatch = TestLatch(),
+ routeeIdx: Int = Random.nextInt(routees.length)): Latches = {
val target = routees(routeeIdx)
val first = TestLatch()
val latches = Latches(first, l)
@@ -334,8 +335,9 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT
}
"ignore further away sample data when optmizing" in {
- val resizer = DefaultOptimalSizeExploringResizer(explorationProbability = 0,
- numOfAdjacentSizesToConsiderDuringOptimization = 4)
+ val resizer = DefaultOptimalSizeExploringResizer(
+ explorationProbability = 0,
+ numOfAdjacentSizesToConsiderDuringOptimization = 4)
resizer.performanceLog =
Map(7 -> 5.millis, 8 -> 2.millis, 10 -> 3.millis, 11 -> 4.millis, 12 -> 3.millis, 13 -> 1.millis)
diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala
index 6d4c99481b..1d81e285a3 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala
@@ -160,13 +160,14 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with
// make sure the pool starts at the expected lower limit and grows to the upper as needed
// as influenced by the backlog of blocking pooled actors
- val resizer = DefaultResizer(lowerBound = 3,
- upperBound = 5,
- rampupRate = 0.1,
- backoffRate = 0.0,
- pressureThreshold = 1,
- messagesPerResize = 1,
- backoffThreshold = 0.0)
+ val resizer = DefaultResizer(
+ lowerBound = 3,
+ upperBound = 5,
+ rampupRate = 0.1,
+ backoffRate = 0.0,
+ pressureThreshold = 1,
+ messagesPerResize = 1,
+ backoffThreshold = 0.0)
val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props(new Actor {
def receive = {
@@ -203,13 +204,14 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with
}
"backoff" in within(10 seconds) {
- val resizer = DefaultResizer(lowerBound = 2,
- upperBound = 5,
- rampupRate = 1.0,
- backoffRate = 1.0,
- backoffThreshold = 0.40,
- pressureThreshold = 1,
- messagesPerResize = 2)
+ val resizer = DefaultResizer(
+ lowerBound = 2,
+ upperBound = 5,
+ rampupRate = 1.0,
+ backoffRate = 1.0,
+ backoffThreshold = 0.40,
+ pressureThreshold = 1,
+ messagesPerResize = 2)
val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props(new Actor {
def receive = {
diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala
index e093866518..6b34eba014 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala
@@ -124,8 +124,9 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
}
}
val router =
- system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor]),
- "router3")
+ system.actorOf(
+ RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor]),
+ "router3")
Await.ready(latch, remainingOrDefault)
router ! GetRoutees
expectMsgType[Routees].routees.size should ===(3)
@@ -232,10 +233,11 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
}
"allow external configuration" in {
- val sys = ActorSystem("FromConfig",
- ConfigFactory
- .parseString("akka.actor.deployment./routed.router=round-robin-pool")
- .withFallback(system.settings.config))
+ val sys = ActorSystem(
+ "FromConfig",
+ ConfigFactory
+ .parseString("akka.actor.deployment./routed.router=round-robin-pool")
+ .withFallback(system.settings.config))
try {
sys.actorOf(FromConfig.props(routeeProps = Props[TestActor]), "routed")
} finally {
diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala
index d12d0f83fa..637e5751aa 100644
--- a/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala
@@ -52,10 +52,9 @@ object SerializationSetupSpec {
val serializationSettings = SerializationSetup { _ =>
List(SerializerDetails("test", programmaticDummySerializer, List(classOf[ProgrammaticDummy])))
}
- val bootstrapSettings = BootstrapSetup(None,
- Some(
- ConfigFactory.parseString(
- """
+ val bootstrapSettings = BootstrapSetup(
+ None,
+ Some(ConfigFactory.parseString("""
akka {
actor {
serialize-messages = off
@@ -69,12 +68,12 @@ object SerializationSetupSpec {
}
}
""")),
- None)
+ None)
val actorSystemSettings = ActorSystemSetup(bootstrapSettings, serializationSettings)
- val noJavaSerializationSystem = ActorSystem("SerializationSettingsSpec" + "NoJavaSerialization",
- ConfigFactory.parseString(
- """
+ val noJavaSerializationSystem = ActorSystem(
+ "SerializationSettingsSpec" + "NoJavaSerialization",
+ ConfigFactory.parseString("""
akka {
actor {
allow-java-serialization = off
@@ -125,15 +124,16 @@ class SerializationSetupSpec
// allow-java-serialization=on to create the SerializationSetup and use that SerializationSetup
// in another system with allow-java-serialization=off
val addedJavaSerializationSettings = SerializationSetup { _ =>
- List(SerializerDetails("test", programmaticDummySerializer, List(classOf[ProgrammaticDummy])),
- SerializerDetails("java-manual",
- new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]),
- List(classOf[ProgrammaticJavaDummy])))
+ List(
+ SerializerDetails("test", programmaticDummySerializer, List(classOf[ProgrammaticDummy])),
+ SerializerDetails(
+ "java-manual",
+ new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]),
+ List(classOf[ProgrammaticJavaDummy])))
}
- val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(None,
- Some(
- ConfigFactory.parseString(
- """
+ val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(
+ None,
+ Some(ConfigFactory.parseString("""
akka {
loglevel = debug
actor {
@@ -143,7 +143,7 @@ class SerializationSetupSpec
}
}
""")),
- None)
+ None)
val addedJavaSerializationViaSettingsSystem =
ActorSystem(
diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala
index fa34eb56e3..9c81a91ff2 100644
--- a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala
@@ -132,16 +132,17 @@ object SerializationTests {
}
"""
- val systemMessageClasses = List[Class[_]](classOf[Create],
- classOf[Recreate],
- classOf[Suspend],
- classOf[Resume],
- classOf[Terminate],
- classOf[Supervise],
- classOf[Watch],
- classOf[Unwatch],
- classOf[Failed],
- NoMessage.getClass)
+ val systemMessageClasses = List[Class[_]](
+ classOf[Create],
+ classOf[Recreate],
+ classOf[Suspend],
+ classOf[Resume],
+ classOf[Terminate],
+ classOf[Supervise],
+ classOf[Watch],
+ classOf[Unwatch],
+ classOf[Failed],
+ NoMessage.getClass)
}
class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) {
@@ -387,64 +388,74 @@ class SerializationCompatibilitySpec extends AkkaSpec(SerializationTests.mostlyR
"be preserved for the Create SystemMessage" in {
// Using null as the cause to avoid a large serialized message and JDK differences
- verify(Create(Some(null)),
- "aced00057372001b616b6b612e64697370617463682e7379736d73672e4372656174650000000000" +
- "0000010200014c00076661696c75726574000e4c7363616c612f4f7074696f6e3b78707372000a73" +
- "63616c612e536f6d651122f2695ea18b740200014c0001787400124c6a6176612f6c616e672f4f62" +
- "6a6563743b7872000c7363616c612e4f7074696f6efe6937fddb0e6674020000787070")
+ verify(
+ Create(Some(null)),
+ "aced00057372001b616b6b612e64697370617463682e7379736d73672e4372656174650000000000" +
+ "0000010200014c00076661696c75726574000e4c7363616c612f4f7074696f6e3b78707372000a73" +
+ "63616c612e536f6d651122f2695ea18b740200014c0001787400124c6a6176612f6c616e672f4f62" +
+ "6a6563743b7872000c7363616c612e4f7074696f6efe6937fddb0e6674020000787070")
}
"be preserved for the Recreate SystemMessage" in {
- verify(Recreate(null),
- "aced00057372001d616b6b612e64697370617463682e7379736d73672e5265637265617465000000" +
- "00000000010200014c000563617573657400154c6a6176612f6c616e672f5468726f7761626c653b" +
- "787070")
+ verify(
+ Recreate(null),
+ "aced00057372001d616b6b612e64697370617463682e7379736d73672e5265637265617465000000" +
+ "00000000010200014c000563617573657400154c6a6176612f6c616e672f5468726f7761626c653b" +
+ "787070")
}
"be preserved for the Suspend SystemMessage" in {
- verify(Suspend(),
- "aced00057372001c616b6b612e64697370617463682e7379736d73672e53757370656e6400000000" +
- "000000010200007870")
+ verify(
+ Suspend(),
+ "aced00057372001c616b6b612e64697370617463682e7379736d73672e53757370656e6400000000" +
+ "000000010200007870")
}
"be preserved for the Resume SystemMessage" in {
- verify(Resume(null),
- "aced00057372001b616b6b612e64697370617463682e7379736d73672e526573756d650000000000" +
- "0000010200014c000f63617573656442794661696c7572657400154c6a6176612f6c616e672f5468" +
- "726f7761626c653b787070")
+ verify(
+ Resume(null),
+ "aced00057372001b616b6b612e64697370617463682e7379736d73672e526573756d650000000000" +
+ "0000010200014c000f63617573656442794661696c7572657400154c6a6176612f6c616e672f5468" +
+ "726f7761626c653b787070")
}
"be preserved for the Terminate SystemMessage" in {
- verify(Terminate(),
- "aced00057372001e616b6b612e64697370617463682e7379736d73672e5465726d696e6174650000" +
- "0000000000010200007870")
+ verify(
+ Terminate(),
+ "aced00057372001e616b6b612e64697370617463682e7379736d73672e5465726d696e6174650000" +
+ "0000000000010200007870")
}
"be preserved for the Supervise SystemMessage" in {
- verify(Supervise(null, true),
- "aced00057372001e616b6b612e64697370617463682e7379736d73672e5375706572766973650000" +
- "0000000000010200025a00056173796e634c00056368696c647400154c616b6b612f6163746f722f" +
- "4163746f725265663b78700170")
+ verify(
+ Supervise(null, true),
+ "aced00057372001e616b6b612e64697370617463682e7379736d73672e5375706572766973650000" +
+ "0000000000010200025a00056173796e634c00056368696c647400154c616b6b612f6163746f722f" +
+ "4163746f725265663b78700170")
}
"be preserved for the Watch SystemMessage" in {
- verify(Watch(null, null),
- "aced00057372001a616b6b612e64697370617463682e7379736d73672e5761746368000000000000" +
- "00010200024c00077761746368656574001d4c616b6b612f6163746f722f496e7465726e616c4163" +
- "746f725265663b4c00077761746368657271007e000178707070")
+ verify(
+ Watch(null, null),
+ "aced00057372001a616b6b612e64697370617463682e7379736d73672e5761746368000000000000" +
+ "00010200024c00077761746368656574001d4c616b6b612f6163746f722f496e7465726e616c4163" +
+ "746f725265663b4c00077761746368657271007e000178707070")
}
"be preserved for the Unwatch SystemMessage" in {
- verify(Unwatch(null, null),
- "aced00057372001c616b6b612e64697370617463682e7379736d73672e556e776174636800000000" +
- "000000010200024c0007776174636865657400154c616b6b612f6163746f722f4163746f72526566" +
- "3b4c00077761746368657271007e000178707070")
+ verify(
+ Unwatch(null, null),
+ "aced00057372001c616b6b612e64697370617463682e7379736d73672e556e776174636800000000" +
+ "000000010200024c0007776174636865657400154c616b6b612f6163746f722f4163746f72526566" +
+ "3b4c00077761746368657271007e000178707070")
}
"be preserved for the NoMessage SystemMessage" in {
- verify(NoMessage,
- "aced00057372001f616b6b612e64697370617463682e7379736d73672e4e6f4d6573736167652400" +
- "000000000000010200007870")
+ verify(
+ NoMessage,
+ "aced00057372001f616b6b612e64697370617463682e7379736d73672e4e6f4d6573736167652400" +
+ "000000000000010200007870")
}
"be preserved for the Failed SystemMessage" in {
// Using null as the cause to avoid a large serialized message and JDK differences
- verify(Failed(null, cause = null, uid = 0),
- "aced00057372001b616b6b612e64697370617463682e7379736d73672e4661696c65640000000000" +
- "0000010200034900037569644c000563617573657400154c6a6176612f6c616e672f5468726f7761" +
- "626c653b4c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b78700000" +
- "00007070")
+ verify(
+ Failed(null, cause = null, uid = 0),
+ "aced00057372001b616b6b612e64697370617463682e7379736d73672e4661696c65640000000000" +
+ "0000010200034900037569644c000563617573657400154c6a6176612f6c616e672f5468726f7761" +
+ "626c653b4c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b78700000" +
+ "00007070")
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala
index 304b51d99e..c451e2b2b5 100644
--- a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala
@@ -624,9 +624,10 @@ trait CustomContainsMatcher {
}
def matchResult(success: Boolean): MatchResult =
- MatchResult(success,
- s"""$left did not contain all of $right in sequence""",
- s"""$left contains all of $right in sequence""")
+ MatchResult(
+ success,
+ s"""$left did not contain all of $right in sequence""",
+ s"""$left contains all of $right in sequence""")
attemptMatch(left.toList, right)
}
@@ -704,12 +705,13 @@ trait QueueSetupHelper {
import akka.util.QueueTestEvents._
- case class TestContext(queue: BoundedBlockingQueue[String],
- events: mutable.Buffer[QueueEvent],
- notEmpty: TestCondition,
- notFull: TestCondition,
- lock: ReentrantLock,
- backingQueue: util.Queue[String])
+ case class TestContext(
+ queue: BoundedBlockingQueue[String],
+ events: mutable.Buffer[QueueEvent],
+ notEmpty: TestCondition,
+ notFull: TestCondition,
+ lock: ReentrantLock,
+ backingQueue: util.Queue[String])
/**
* Backing queue that records all poll and offer calls in `events`
@@ -735,10 +737,11 @@ trait QueueSetupHelper {
/**
* Reentrant lock condition that records when the condition is signaled or `await`ed.
*/
- class TestCondition(events: mutable.Buffer[QueueEvent],
- condition: Condition,
- signalEvent: QueueEvent,
- awaitEvent: QueueEvent)
+ class TestCondition(
+ events: mutable.Buffer[QueueEvent],
+ condition: Condition,
+ signalEvent: QueueEvent,
+ awaitEvent: QueueEvent)
extends Condition {
case class Manual(waitTime: Long = 0, waitingThread: Option[Thread] = None)
diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
index 20f7d1f5f2..817c8c5411 100644
--- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
@@ -164,8 +164,9 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers {
(!strict || (bsIterator.toSeq == vecIterator.toSeq))
}
- def likeVecIts(a: ByteString, b: ByteString)(body: (BufferedIterator[Byte], BufferedIterator[Byte]) => Any,
- strict: Boolean = true): Boolean = {
+ def likeVecIts(a: ByteString, b: ByteString)(
+ body: (BufferedIterator[Byte], BufferedIterator[Byte]) => Any,
+ strict: Boolean = true): Boolean = {
val (bsAIt, bsBIt) = (a.iterator, b.iterator)
val (vecAIt, vecBIt) = (Vector(a: _*).iterator.buffered, Vector(b: _*).iterator.buffered)
(body(bsAIt, bsBIt) == body(vecAIt, vecBIt)) &&
@@ -689,17 +690,19 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers {
(a ++ b ++ c) should ===(xs)
}
"recombining - edge cases" in {
- excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](1)), ByteString1(Array[Byte](2)))),
- -2147483648,
- 112121212)
+ excerciseRecombining(
+ ByteStrings(Vector(ByteString1(Array[Byte](1)), ByteString1(Array[Byte](2)))),
+ -2147483648,
+ 112121212)
excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](100)))), 0, 2)
excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](100)))), -2147483648, 2)
excerciseRecombining(ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))), 0, 1)
excerciseRecombining(ByteString1.fromString("abc").drop(1).take(1), -324234, 234232)
excerciseRecombining(ByteString("a"), 0, 2147483647)
- excerciseRecombining(ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))).drop(2),
- 2147483647,
- 1)
+ excerciseRecombining(
+ ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))).drop(2),
+ 2147483647,
+ 1)
excerciseRecombining(ByteString1.fromString("ab").drop1(1), Int.MaxValue, Int.MaxValue)
}
}
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala
index 8a321ec413..a4d296744e 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala
@@ -517,16 +517,18 @@ class InterceptScalaBehaviorSpec extends ImmutableWithSignalScalaBehaviorSpec wi
override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = {
val inbox = TestInbox[Either[Signal, Command]]("tapListener")
val tap = new BehaviorInterceptor[Command, Command] {
- override def aroundReceive(context: TypedActorContext[Command],
- message: Command,
- target: ReceiveTarget[Command]): Behavior[Command] = {
+ override def aroundReceive(
+ context: TypedActorContext[Command],
+ message: Command,
+ target: ReceiveTarget[Command]): Behavior[Command] = {
inbox.ref ! Right(message)
target(context, message)
}
- override def aroundSignal(context: TypedActorContext[Command],
- signal: Signal,
- target: SignalTarget[Command]): Behavior[Command] = {
+ override def aroundSignal(
+ context: TypedActorContext[Command],
+ signal: Signal,
+ target: SignalTarget[Command]): Behavior[Command] = {
inbox.ref ! Left(signal)
target(context, signal)
}
@@ -567,7 +569,7 @@ class ImmutableWithSignalJavaBehaviorSpec extends Messages with BecomeWithLifecy
SBehaviors.same
case Stop => SBehaviors.stopped
case _: AuxPing => SBehaviors.unhandled
- }),
+ }),
fs((_, sig) => {
monitor ! ReceivedSignal(sig)
SBehaviors.same
@@ -600,7 +602,7 @@ class ImmutableJavaBehaviorSpec extends Messages with Become with Stoppable {
SBehaviors.same
case Stop => SBehaviors.stopped
case _: AuxPing => SBehaviors.unhandled
- })
+ })
}
}
@@ -635,16 +637,18 @@ class TapJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec with Reuse
override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = {
val inbox = TestInbox[Either[Signal, Command]]("tapListener")
val tap = new BehaviorInterceptor[Command, Command] {
- override def aroundReceive(context: TypedActorContext[Command],
- message: Command,
- target: ReceiveTarget[Command]): Behavior[Command] = {
+ override def aroundReceive(
+ context: TypedActorContext[Command],
+ message: Command,
+ target: ReceiveTarget[Command]): Behavior[Command] = {
inbox.ref ! Right(message)
target(context, message)
}
- override def aroundSignal(context: TypedActorContext[Command],
- signal: Signal,
- target: SignalTarget[Command]): Behavior[Command] = {
+ override def aroundSignal(
+ context: TypedActorContext[Command],
+ signal: Signal,
+ target: SignalTarget[Command]): Behavior[Command] = {
inbox.ref ! Left(signal)
target(context, signal)
}
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala
index f18879d3c4..f3dbbe2d81 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala
@@ -95,9 +95,11 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike {
}
"load extensions from the configuration" in
- withEmptyActorSystem("ExtensionsSpec03",
- Some(ConfigFactory.parseString(
- """
+ withEmptyActorSystem(
+ "ExtensionsSpec03",
+ Some(
+ ConfigFactory.parseString(
+ """
akka.actor.typed.extensions = ["akka.actor.typed.DummyExtension1$", "akka.actor.typed.SlowExtension$"]
"""))) { sys =>
sys.hasExtension(DummyExtension1) should ===(true)
@@ -109,10 +111,10 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike {
"handle extensions that fail to initialize" in {
def create(): Unit = {
- ActorSystem[Any](Behavior.EmptyBehavior,
- "ExtensionsSpec04",
- ConfigFactory.parseString(
- """
+ ActorSystem[Any](
+ Behavior.EmptyBehavior,
+ "ExtensionsSpec04",
+ ConfigFactory.parseString("""
akka.actor.typed.extensions = ["akka.actor.typed.FailingToLoadExtension$"]
"""))
}
@@ -157,9 +159,10 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike {
"fail the system if a library-extension is missing" in
intercept[RuntimeException] {
- withEmptyActorSystem("ExtensionsSpec08",
- Some(ConfigFactory.parseString(
- """akka.actor.typed.library-extensions += "akka.actor.typed.MissingExtension""""))) { _ =>
+ withEmptyActorSystem(
+ "ExtensionsSpec08",
+ Some(ConfigFactory.parseString(
+ """akka.actor.typed.library-extensions += "akka.actor.typed.MissingExtension""""))) { _ =>
()
}
}
@@ -206,12 +209,14 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike {
}
"override extensions via ActorSystemSetup" in
- withEmptyActorSystem("ExtensionsSpec10",
- Some(ConfigFactory.parseString(
- """
+ withEmptyActorSystem(
+ "ExtensionsSpec10",
+ Some(
+ ConfigFactory.parseString(
+ """
akka.actor.typed.extensions = ["akka.actor.typed.DummyExtension1$", "akka.actor.typed.SlowExtension$"]
""")),
- Some(ActorSystemSetup(new DummyExtension1Setup(sys => new DummyExtension1ViaSetup)))) { sys =>
+ Some(ActorSystemSetup(new DummyExtension1Setup(sys => new DummyExtension1ViaSetup)))) { sys =>
sys.hasExtension(DummyExtension1) should ===(true)
sys.extension(DummyExtension1) shouldBe a[DummyExtension1ViaSetup]
DummyExtension1(sys) shouldBe a[DummyExtension1ViaSetup]
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala
index 802d741bb1..f675386381 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala
@@ -31,18 +31,20 @@ class InterceptSpec extends ScalaTestWithActorTestKit("""
implicit val untypedSystem = system.toUntyped
private def snitchingInterceptor(probe: ActorRef[String]) = new BehaviorInterceptor[String, String] {
- override def aroundReceive(context: TypedActorContext[String],
- message: String,
- target: ReceiveTarget[String]): Behavior[String] = {
+ override def aroundReceive(
+ context: TypedActorContext[String],
+ message: String,
+ target: ReceiveTarget[String]): Behavior[String] = {
probe ! ("before " + message)
val b = target(context, message)
probe ! ("after " + message)
b
}
- override def aroundSignal(context: TypedActorContext[String],
- signal: Signal,
- target: SignalTarget[String]): Behavior[String] = {
+ override def aroundSignal(
+ context: TypedActorContext[String],
+ signal: Signal,
+ target: SignalTarget[String]): Behavior[String] = {
target(context, signal)
}
@@ -154,19 +156,22 @@ class InterceptSpec extends ScalaTestWithActorTestKit("""
"allow an interceptor to replace started behavior" in {
val interceptor = new BehaviorInterceptor[String, String] {
- override def aroundStart(context: TypedActorContext[String],
- target: PreStartTarget[String]): Behavior[String] = {
+ override def aroundStart(
+ context: TypedActorContext[String],
+ target: PreStartTarget[String]): Behavior[String] = {
Behaviors.stopped
}
- def aroundReceive(context: TypedActorContext[String],
- message: String,
- target: ReceiveTarget[String]): Behavior[String] =
+ def aroundReceive(
+ context: TypedActorContext[String],
+ message: String,
+ target: ReceiveTarget[String]): Behavior[String] =
target(context, message)
- def aroundSignal(context: TypedActorContext[String],
- signal: Signal,
- target: SignalTarget[String]): Behavior[String] =
+ def aroundSignal(
+ context: TypedActorContext[String],
+ signal: Signal,
+ target: SignalTarget[String]): Behavior[String] =
target(context, signal)
}
@@ -260,18 +265,20 @@ class InterceptSpec extends ScalaTestWithActorTestKit("""
}
val poisonInterceptor = new BehaviorInterceptor[Any, Msg] {
- override def aroundReceive(context: TypedActorContext[Any],
- message: Any,
- target: ReceiveTarget[Msg]): Behavior[Msg] =
+ override def aroundReceive(
+ context: TypedActorContext[Any],
+ message: Any,
+ target: ReceiveTarget[Msg]): Behavior[Msg] =
message match {
case MyPoisonPill => Behaviors.stopped
case m: Msg => target(context, m)
case _ => Behaviors.unhandled
}
- override def aroundSignal(context: TypedActorContext[Any],
- signal: Signal,
- target: SignalTarget[Msg]): Behavior[Msg] =
+ override def aroundSignal(
+ context: TypedActorContext[Any],
+ signal: Signal,
+ target: SignalTarget[Msg]): Behavior[Msg] =
target.apply(context, signal)
}
@@ -302,16 +309,18 @@ class InterceptSpec extends ScalaTestWithActorTestKit("""
override def interceptMessageType = classOf[B]
- override def aroundReceive(ctx: TypedActorContext[Message],
- msg: Message,
- target: ReceiveTarget[Message]): Behavior[Message] = {
+ override def aroundReceive(
+ ctx: TypedActorContext[Message],
+ msg: Message,
+ target: ReceiveTarget[Message]): Behavior[Message] = {
interceptProbe.ref ! msg
target(ctx, msg)
}
- override def aroundSignal(ctx: TypedActorContext[Message],
- signal: Signal,
- target: SignalTarget[Message]): Behavior[Message] =
+ override def aroundSignal(
+ ctx: TypedActorContext[Message],
+ signal: Signal,
+ target: SignalTarget[Message]): Behavior[Message] =
target(ctx, signal)
}
@@ -332,14 +341,16 @@ class InterceptSpec extends ScalaTestWithActorTestKit("""
"intercept PostStop" in {
val probe = TestProbe[String]()
val postStopInterceptor = new BehaviorInterceptor[String, String] {
- def aroundReceive(ctx: TypedActorContext[String],
- msg: String,
- target: ReceiveTarget[String]): Behavior[String] = {
+ def aroundReceive(
+ ctx: TypedActorContext[String],
+ msg: String,
+ target: ReceiveTarget[String]): Behavior[String] = {
target(ctx, msg)
}
- def aroundSignal(ctx: TypedActorContext[String],
- signal: Signal,
- target: SignalTarget[String]): Behavior[String] = {
+ def aroundSignal(
+ ctx: TypedActorContext[String],
+ signal: Signal,
+ target: SignalTarget[String]): Behavior[String] = {
signal match {
case PostStop =>
probe.ref ! "interceptor-post-stop"
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala
index 26c5266f88..d7a943e75e 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala
@@ -42,9 +42,10 @@ object SupervisionSpec {
class Exc2 extends Exc1("exc-2")
class Exc3(message: String = "exc-3") extends RuntimeException(message) with NoStackTrace
- def targetBehavior(monitor: ActorRef[Event],
- state: State = State(0, Map.empty),
- slowStop: Option[CountDownLatch] = None): Behavior[Command] =
+ def targetBehavior(
+ monitor: ActorRef[Event],
+ state: State = State(0, Map.empty),
+ slowStop: Option[CountDownLatch] = None): Behavior[Command] =
receive[Command] { (context, cmd) =>
cmd match {
case Ping(n) =>
@@ -1066,14 +1067,16 @@ class SupervisionSpec extends ScalaTestWithActorTestKit("""
// irrelevant for test case but needed to use intercept in the pyramid of doom below
val whateverInterceptor = new BehaviorInterceptor[String, String] {
// identity intercept
- override def aroundReceive(context: TypedActorContext[String],
- message: String,
- target: ReceiveTarget[String]): Behavior[String] =
+ override def aroundReceive(
+ context: TypedActorContext[String],
+ message: String,
+ target: ReceiveTarget[String]): Behavior[String] =
target(context, message)
- override def aroundSignal(context: TypedActorContext[String],
- signal: Signal,
- target: SignalTarget[String]): Behavior[String] =
+ override def aroundSignal(
+ context: TypedActorContext[String],
+ signal: Signal,
+ target: SignalTarget[String]): Behavior[String] =
target(context, signal)
}
@@ -1197,11 +1200,12 @@ class SupervisionSpec extends ScalaTestWithActorTestKit("""
}
- val allStrategies = Seq(SupervisorStrategy.stop,
- SupervisorStrategy.restart,
- SupervisorStrategy.resume,
- SupervisorStrategy.restartWithBackoff(1.millis, 100.millis, 2d),
- SupervisorStrategy.restart.withLimit(1, 100.millis))
+ val allStrategies = Seq(
+ SupervisorStrategy.stop,
+ SupervisorStrategy.restart,
+ SupervisorStrategy.resume,
+ SupervisorStrategy.restartWithBackoff(1.millis, 100.millis, 2d),
+ SupervisorStrategy.restart.withLimit(1, 100.millis))
allStrategies.foreach { strategy =>
s"Supervision with the strategy $strategy" should {
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala
index 0ab9ddf73a..bebc17919c 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala
@@ -118,12 +118,13 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe
val probe = TestProbe[Any]()
val ex = new TestException("boom")
val behavior = Behaviors.setup[Any] { context =>
- val child = context.spawn(Behaviors
- .supervise(Behaviors.receive[Any]((_, _) => {
- throw ex
- }))
- .onFailure[Throwable](SupervisorStrategy.stop),
- "child")
+ val child = context.spawn(
+ Behaviors
+ .supervise(Behaviors.receive[Any]((_, _) => {
+ throw ex
+ }))
+ .onFailure[Throwable](SupervisorStrategy.stop),
+ "child")
context.watch(child)
Behaviors
@@ -153,8 +154,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe
val probe = TestProbe[Any]()
val ex = new TestException("boom")
val grossoBosso =
- spawn(Behaviors.setup[Any] {
- context =>
+ spawn(
+ Behaviors.setup[Any] { context =>
val middleManagement = context.spawn(Behaviors.setup[Any] { context =>
val sixPackJoe = context.spawn(Behaviors.receive[Any]((context, message) => throw ex), "joe")
context.watch(sixPackJoe)
@@ -178,7 +179,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe
Behaviors.stopped
}
- }, "grosso-bosso")
+ },
+ "grosso-bosso")
EventFilter[TestException](occurrences = 1).intercept {
EventFilter[DeathPactException](occurrences = 1).intercept {
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala
index ea78401c0c..92816cd982 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala
@@ -39,9 +39,10 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll with
"An ActorSystem" must {
"start the guardian actor and terminate when it terminates" in {
- val t = withSystem("a",
- Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.message; Behaviors.stopped },
- doTerminate = false) { sys =>
+ val t = withSystem(
+ "a",
+ Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.message; Behaviors.stopped },
+ doTerminate = false) { sys =>
val inbox = TestInbox[String]("a")
sys ! Probe("hello", inbox.ref)
eventually {
@@ -68,16 +69,17 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll with
"terminate the guardian actor" in {
val inbox = TestInbox[String]("terminate")
- val sys = system(Behaviors
- .receive[Probe] {
- case (_, _) => Behaviors.unhandled
- }
- .receiveSignal {
- case (_, PostStop) =>
- inbox.ref ! "done"
- Behaviors.same
- },
- "terminate")
+ val sys = system(
+ Behaviors
+ .receive[Probe] {
+ case (_, _) => Behaviors.unhandled
+ }
+ .receiveSignal {
+ case (_, PostStop) =>
+ inbox.ref ! "done"
+ Behaviors.same
+ },
+ "terminate")
sys.terminate().futureValue
inbox.receiveAll() should ===("done" :: Nil)
}
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala
index ded025773f..0b94c0bb8a 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala
@@ -302,12 +302,13 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit("""
"Logging with MDC for a typed actor" must {
"provide the MDC values in the log" in {
- val behaviors = Behaviors.withMdc[Protocol](Map("static" -> 1),
- // FIXME why u no infer the type here Scala??
- (message: Protocol) =>
- if (message.transactionId == 1)
- Map("txId" -> message.transactionId, "first" -> true)
- else Map("txId" -> message.transactionId)) {
+ val behaviors = Behaviors.withMdc[Protocol](
+ Map("static" -> 1),
+ // FIXME why u no infer the type here Scala??
+ (message: Protocol) =>
+ if (message.transactionId == 1)
+ Map("txId" -> message.transactionId, "first" -> true)
+ else Map("txId" -> message.transactionId)) {
Behaviors.setup { context =>
context.log.info("Starting")
Behaviors.receiveMessage { _ =>
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala
index 6b791b1ba0..4d0d1ebff1 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala
@@ -67,15 +67,17 @@ class StopSpec extends ScalaTestWithActorTestKit with WordSpecLike {
val probe = TestProbe[Done]()
spawn(Behaviors.setup[AnyRef] { _ =>
Behaviors.intercept(new BehaviorInterceptor[AnyRef, AnyRef] {
- override def aroundReceive(context: typed.TypedActorContext[AnyRef],
- message: AnyRef,
- target: ReceiveTarget[AnyRef]): Behavior[AnyRef] = {
+ override def aroundReceive(
+ context: typed.TypedActorContext[AnyRef],
+ message: AnyRef,
+ target: ReceiveTarget[AnyRef]): Behavior[AnyRef] = {
target(context, message)
}
- override def aroundSignal(context: typed.TypedActorContext[AnyRef],
- signal: Signal,
- target: SignalTarget[AnyRef]): Behavior[AnyRef] = {
+ override def aroundSignal(
+ context: typed.TypedActorContext[AnyRef],
+ signal: Signal,
+ target: SignalTarget[AnyRef]): Behavior[AnyRef] = {
target(context, signal)
}
})(Behaviors.stopped { () =>
diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala
index eda729910a..9d992fa383 100644
--- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala
@@ -178,8 +178,9 @@ class AdapterSpec extends AkkaSpec("""
for { _ <- 0 to 10 } {
var system: akka.actor.typed.ActorSystem[NotUsed] = null
try {
- system = ActorSystem.create(Behaviors.setup[NotUsed](_ => Behavior.stopped[NotUsed]),
- "AdapterSpec-stopping-guardian")
+ system = ActorSystem.create(
+ Behaviors.setup[NotUsed](_ => Behavior.stopped[NotUsed]),
+ "AdapterSpec-stopping-guardian")
} finally if (system != null) shutdown(system.toUntyped)
}
}
diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala
index 704e7ffe74..771b3d2661 100644
--- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala
@@ -161,21 +161,23 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with WordSpecLik
Behaviors.withTimers(timers => idle(timers, target, after, maxSize))
}
- def idle(timers: TimerScheduler[Msg],
- target: ActorRef[Batch],
- after: FiniteDuration,
- maxSize: Int): Behavior[Msg] = {
+ def idle(
+ timers: TimerScheduler[Msg],
+ target: ActorRef[Batch],
+ after: FiniteDuration,
+ maxSize: Int): Behavior[Msg] = {
Behaviors.receiveMessage[Msg] { message =>
timers.startSingleTimer(TimerKey, Timeout, after)
active(Vector(message), timers, target, after, maxSize)
}
}
- def active(buffer: Vector[Msg],
- timers: TimerScheduler[Msg],
- target: ActorRef[Batch],
- after: FiniteDuration,
- maxSize: Int): Behavior[Msg] = {
+ def active(
+ buffer: Vector[Msg],
+ timers: TimerScheduler[Msg],
+ target: ActorRef[Batch],
+ after: FiniteDuration,
+ maxSize: Int): Behavior[Msg] = {
Behaviors.receiveMessage[Msg] {
case Timeout =>
target ! Batch(buffer)
@@ -298,10 +300,11 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with WordSpecLik
}
// per session actor behavior
- def prepareToLeaveHome(whoIsLeaving: String,
- respondTo: ActorRef[ReadyToLeaveHome],
- keyCabinet: ActorRef[GetKeys],
- drawer: ActorRef[GetWallet]): Behavior[NotUsed] =
+ def prepareToLeaveHome(
+ whoIsLeaving: String,
+ respondTo: ActorRef[ReadyToLeaveHome],
+ keyCabinet: ActorRef[GetKeys],
+ drawer: ActorRef[GetWallet]): Behavior[NotUsed] =
// we don't _really_ care about the actor protocol here as nobody will send us
// messages except for responses to our queries, so we just accept any kind of message
// but narrow that to more limited types then we interact
diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala
index 8e39948798..9d8363d5f0 100644
--- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala
@@ -133,8 +133,9 @@ object IntroSpec {
message match {
case GetSession(screenName, client) =>
// create a child actor for further interaction with the client
- val ses = context.spawn(session(context.self, screenName, client),
- name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name))
+ val ses = context.spawn(
+ session(context.self, screenName, client),
+ name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name))
client ! SessionGranted(ses)
chatRoom(ses :: sessions)
case PublishSessionMessage(screenName, message) =>
@@ -144,9 +145,10 @@ object IntroSpec {
}
}
- private def session(room: ActorRef[PublishSessionMessage],
- screenName: String,
- client: ActorRef[SessionEvent]): Behavior[SessionCommand] =
+ private def session(
+ room: ActorRef[PublishSessionMessage],
+ screenName: String,
+ client: ActorRef[SessionEvent]): Behavior[SessionCommand] =
Behaviors.receive { (context, message) =>
message match {
case PostMessage(message) =>
diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala
index 21c16c9d14..30f8362deb 100644
--- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala
+++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala
@@ -49,8 +49,9 @@ object OOIntroSpec {
message match {
case GetSession(screenName, client) =>
// create a child actor for further interaction with the client
- val ses = context.spawn(session(context.self, screenName, client),
- name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name))
+ val ses = context.spawn(
+ session(context.self, screenName, client),
+ name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name))
client ! SessionGranted(ses)
sessions = ses :: sessions
this
@@ -62,9 +63,10 @@ object OOIntroSpec {
}
}
- private def session(room: ActorRef[PublishSessionMessage],
- screenName: String,
- client: ActorRef[SessionEvent]): Behavior[SessionCommand] =
+ private def session(
+ room: ActorRef[PublishSessionMessage],
+ screenName: String,
+ client: ActorRef[SessionEvent]): Behavior[SessionCommand] =
Behaviors.receiveMessage {
case PostMessage(message) =>
// from client, publish to others via the room
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala
index e2c47f4810..2416f06b84 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala
@@ -172,10 +172,11 @@ object ActorSystem {
* Scala API: Creates a new actor system with the specified name and settings
* The core actor system settings are defined in [[BootstrapSetup]]
*/
- def apply[T](guardianBehavior: Behavior[T],
- name: String,
- setup: ActorSystemSetup,
- guardianProps: Props = Props.empty): ActorSystem[T] = {
+ def apply[T](
+ guardianBehavior: Behavior[T],
+ name: String,
+ setup: ActorSystemSetup,
+ guardianProps: Props = Props.empty): ActorSystem[T] = {
createInternal(name, guardianBehavior, guardianProps, setup)
}
@@ -217,10 +218,11 @@ object ActorSystem {
* which runs Akka Typed [[Behavior]] on an emulation layer. In this
* system typed and untyped actors can coexist.
*/
- private def createInternal[T](name: String,
- guardianBehavior: Behavior[T],
- guardianProps: Props,
- setup: ActorSystemSetup): ActorSystem[T] = {
+ private def createInternal[T](
+ name: String,
+ guardianBehavior: Behavior[T],
+ guardianProps: Props,
+ setup: ActorSystemSetup): ActorSystem[T] = {
Behavior.validateAsInitial(guardianBehavior)
require(Behavior.isAlive(guardianBehavior))
@@ -230,13 +232,13 @@ object ActorSystem {
val appConfig = bootstrapSettings.flatMap(_.config).getOrElse(ConfigFactory.load(cl))
val executionContext = bootstrapSettings.flatMap(_.defaultExecutionContext)
- val system = new untyped.ActorSystemImpl(name,
- appConfig,
- cl,
- executionContext,
- Some(
- PropsAdapter(() => guardianBehavior, guardianProps, isGuardian = true)),
- setup)
+ val system = new untyped.ActorSystemImpl(
+ name,
+ appConfig,
+ cl,
+ executionContext,
+ Some(PropsAdapter(() => guardianBehavior, guardianProps, isGuardian = true)),
+ setup)
system.start()
system.guardian ! GuardianActorAdapter.Start
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala
index 978a8069a8..34fb4644a8 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala
@@ -162,8 +162,9 @@ trait Extensions {
* implementation of the extension. Intended for tests that need to replace
* extension with stub/mock implementations.
*/
-abstract class ExtensionSetup[T <: Extension](val extId: ExtensionId[T],
- val createExtension: java.util.function.Function[ActorSystem[_], T])
+abstract class ExtensionSetup[T <: Extension](
+ val extId: ExtensionId[T],
+ val createExtension: java.util.function.Function[ActorSystem[_], T])
extends Setup
/**
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala
index ac69297a38..384e2c3c2e 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala
@@ -65,9 +65,10 @@ object SupervisorStrategy {
* random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay.
* In order to skip this additional delay pass in `0`.
*/
- def restartWithBackoff(minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): BackoffSupervisorStrategy =
+ def restartWithBackoff(
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): BackoffSupervisorStrategy =
Backoff(minBackoff, maxBackoff, randomFactor, resetBackoffAfter = minBackoff)
/**
@@ -97,9 +98,10 @@ object SupervisorStrategy {
* random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay.
* In order to skip this additional delay pass in `0`.
*/
- def restartWithBackoff(minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double): BackoffSupervisorStrategy =
+ def restartWithBackoff(
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double): BackoffSupervisorStrategy =
restartWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor)
/**
@@ -133,11 +135,12 @@ object SupervisorStrategy {
/**
* INTERNAL API
*/
- @InternalApi private[akka] final case class Restart(maxRestarts: Int,
- withinTimeRange: FiniteDuration,
- loggingEnabled: Boolean = true,
- stopChildren: Boolean = true,
- stashCapacity: Int = -1)
+ @InternalApi private[akka] final case class Restart(
+ maxRestarts: Int,
+ withinTimeRange: FiniteDuration,
+ loggingEnabled: Boolean = true,
+ stopChildren: Boolean = true,
+ stashCapacity: Int = -1)
extends RestartSupervisorStrategy
with RestartOrBackoff {
@@ -161,14 +164,15 @@ object SupervisorStrategy {
/**
* INTERNAL API
*/
- @InternalApi private[akka] final case class Backoff(minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- resetBackoffAfter: FiniteDuration,
- loggingEnabled: Boolean = true,
- maxRestarts: Int = -1,
- stopChildren: Boolean = true,
- stashCapacity: Int = -1)
+ @InternalApi private[akka] final case class Backoff(
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ resetBackoffAfter: FiniteDuration,
+ loggingEnabled: Boolean = true,
+ maxRestarts: Int = -1,
+ stopChildren: Boolean = true,
+ stashCapacity: Int = -1)
extends BackoffSupervisorStrategy
with RestartOrBackoff {
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala
index 133f2615af..3de4f64583 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala
@@ -91,11 +91,12 @@ import akka.util.JavaDurationConverters._
}
// Java API impl
- def ask[Req, Res](resClass: Class[Res],
- target: RecipientRef[Req],
- responseTimeout: Duration,
- createRequest: JFunction[ActorRef[Res], Req],
- applyToResponse: BiFunction[Res, Throwable, T]): Unit = {
+ def ask[Req, Res](
+ resClass: Class[Res],
+ target: RecipientRef[Req],
+ responseTimeout: Duration,
+ createRequest: JFunction[ActorRef[Res], Req],
+ applyToResponse: BiFunction[Res, Throwable, T]): Unit = {
import akka.actor.typed.javadsl.AskPattern
val message = new akka.japi.function.Function[ActorRef[Res], Req] {
def apply(ref: ActorRef[Res]): Req = createRequest(ref)
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala
index 2e09ab3195..e2ff7cc899 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala
@@ -23,14 +23,16 @@ import akka.actor.typed.scaladsl.{ ActorContext => SAC }
def widened[O, I](behavior: Behavior[I], matcher: PartialFunction[O, I]): Behavior[O] =
intercept(WidenedInterceptor(matcher))(behavior)
- class ReceiveBehavior[T](val onMessage: (SAC[T], T) => Behavior[T],
- onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] =
- Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
+ class ReceiveBehavior[T](
+ val onMessage: (SAC[T], T) => Behavior[T],
+ onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] =
+ Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
extends ExtensibleBehavior[T] {
override def receiveSignal(ctx: AC[T], msg: Signal): Behavior[T] =
- onSignal.applyOrElse((ctx.asScala, msg),
- Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
+ onSignal.applyOrElse(
+ (ctx.asScala, msg),
+ Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
override def receive(ctx: AC[T], msg: T) = onMessage(ctx.asScala, msg)
@@ -42,16 +44,18 @@ import akka.actor.typed.scaladsl.{ ActorContext => SAC }
* We implement it separately in order to be able to avoid wrapping each function in
* another function which drops the context parameter.
*/
- class ReceiveMessageBehavior[T](val onMessage: T => Behavior[T],
- onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] = Behavior.unhandledSignal
- .asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
+ class ReceiveMessageBehavior[T](
+ val onMessage: T => Behavior[T],
+ onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] =
+ Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
extends ExtensibleBehavior[T] {
override def receive(ctx: AC[T], msg: T) = onMessage(msg)
override def receiveSignal(ctx: AC[T], msg: Signal): Behavior[T] =
- onSignal.applyOrElse((ctx.asScala, msg),
- Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
+ onSignal.applyOrElse(
+ (ctx.asScala, msg),
+ Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]])
override def toString = s"ReceiveMessage(${LineNumbers(onMessage)})"
}
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala
index b96dd3c02a..c60ef577d3 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala
@@ -33,8 +33,9 @@ private[akka] object InterceptorImpl {
* INTERNAL API
*/
@InternalApi
-private[akka] final class InterceptorImpl[O, I](val interceptor: BehaviorInterceptor[O, I],
- val nestedBehavior: Behavior[I])
+private[akka] final class InterceptorImpl[O, I](
+ val interceptor: BehaviorInterceptor[O, I],
+ val nestedBehavior: Behavior[I])
extends ExtensibleBehavior[O]
with WrappingBehavior[O, I] {
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala
index 737e24b024..1fdd990616 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala
@@ -34,14 +34,16 @@ import akka.annotation.InternalApi
* and process stashed messages before stopping.
*/
@InternalApi private[akka] final class PoisonPillInterceptor[M] extends BehaviorInterceptor[M, M] {
- override def aroundReceive(ctx: TypedActorContext[M],
- msg: M,
- target: BehaviorInterceptor.ReceiveTarget[M]): Behavior[M] =
+ override def aroundReceive(
+ ctx: TypedActorContext[M],
+ msg: M,
+ target: BehaviorInterceptor.ReceiveTarget[M]): Behavior[M] =
target(ctx, msg)
- override def aroundSignal(ctx: TypedActorContext[M],
- signal: Signal,
- target: BehaviorInterceptor.SignalTarget[M]): Behavior[M] = {
+ override def aroundSignal(
+ ctx: TypedActorContext[M],
+ signal: Signal,
+ target: BehaviorInterceptor.SignalTarget[M]): Behavior[M] = {
signal match {
case p: PoisonPill =>
val next = target(ctx, p)
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala
index 24242af848..3bf2488f89 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala
@@ -33,9 +33,10 @@ import akka.util.ConstantFun
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class StashBufferImpl[T] private (val capacity: Int,
- private var _first: StashBufferImpl.Node[T],
- private var _last: StashBufferImpl.Node[T])
+@InternalApi private[akka] final class StashBufferImpl[T] private (
+ val capacity: Int,
+ private var _first: StashBufferImpl.Node[T],
+ private var _last: StashBufferImpl.Node[T])
extends javadsl.StashBuffer[T]
with scaladsl.StashBuffer[T] {
@@ -100,10 +101,11 @@ import akka.util.ConstantFun
override def unstashAll(ctx: javadsl.ActorContext[T], behavior: Behavior[T]): Behavior[T] =
unstashAll(ctx.asScala, behavior)
- override def unstash(ctx: scaladsl.ActorContext[T],
- behavior: Behavior[T],
- numberOfMessages: Int,
- wrap: T => T): Behavior[T] = {
+ override def unstash(
+ ctx: scaladsl.ActorContext[T],
+ behavior: Behavior[T],
+ numberOfMessages: Int,
+ wrap: T => T): Behavior[T] = {
if (isEmpty)
behavior // optimization
else {
@@ -115,9 +117,10 @@ import akka.util.ConstantFun
}
}
- private def interpretUnstashedMessages(behavior: Behavior[T],
- ctx: TypedActorContext[T],
- messages: Iterator[T]): Behavior[T] = {
+ private def interpretUnstashedMessages(
+ behavior: Behavior[T],
+ ctx: TypedActorContext[T],
+ messages: Iterator[T]): Behavior[T] = {
@tailrec def interpretOne(b: Behavior[T]): Behavior[T] = {
val b2 = Behavior.start(b, ctx)
if (!Behavior.isAlive(b2) || !messages.hasNext) b2
@@ -138,10 +141,11 @@ import akka.util.ConstantFun
interpretOne(Behavior.start(behavior, ctx))
}
- override def unstash(ctx: javadsl.ActorContext[T],
- behavior: Behavior[T],
- numberOfMessages: Int,
- wrap: JFunction[T, T]): Behavior[T] =
+ override def unstash(
+ ctx: javadsl.ActorContext[T],
+ behavior: Behavior[T],
+ numberOfMessages: Int,
+ wrap: JFunction[T, T]): Behavior[T] =
unstash(ctx.asScala, behavior, numberOfMessages, x => wrap.apply(x))
override def toString: String =
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala
index 91f2e20a26..67c921c431 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala
@@ -144,10 +144,11 @@ private object RestartSupervisor {
/**
* Calculates an exponential back off delay.
*/
- def calculateDelay(restartCount: Int,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): FiniteDuration = {
+ def calculateDelay(
+ restartCount: Int,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): FiniteDuration = {
val rnd = 1.0 + ThreadLocalRandom.current().nextDouble() * randomFactor
if (restartCount >= 30) // Duration overflow protection (> 100 years)
maxBackoff
@@ -250,8 +251,9 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav
}
}
- override protected def handleExceptionOnStart(ctx: TypedActorContext[O],
- @unused target: PreStartTarget[T]): Catcher[Behavior[T]] = {
+ override protected def handleExceptionOnStart(
+ ctx: TypedActorContext[O],
+ @unused target: PreStartTarget[T]): Catcher[Behavior[T]] = {
case NonFatal(t) if isInstanceOfTheThrowableClass(t) =>
strategy match {
case _: Restart =>
@@ -267,15 +269,17 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav
}
}
- override protected def handleSignalException(ctx: TypedActorContext[O],
- target: SignalTarget[T]): Catcher[Behavior[T]] = {
+ override protected def handleSignalException(
+ ctx: TypedActorContext[O],
+ target: SignalTarget[T]): Catcher[Behavior[T]] = {
handleException(ctx, signalRestart = {
case e: UnstashException[O] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart)
case _ => target(ctx, PreRestart)
})
}
- override protected def handleReceiveException(ctx: TypedActorContext[O],
- target: ReceiveTarget[T]): Catcher[Behavior[T]] = {
+ override protected def handleReceiveException(
+ ctx: TypedActorContext[O],
+ target: ReceiveTarget[T]): Catcher[Behavior[T]] = {
handleException(ctx, signalRestart = {
case e: UnstashException[O] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart)
case _ => target.signalRestart(ctx)
@@ -335,9 +339,10 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav
strategy match {
case backoff: Backoff =>
gotScheduledRestart = false
- ctx.asScala.scheduleOnce(backoff.resetBackoffAfter,
- ctx.asScala.self.unsafeUpcast[Any],
- ResetRestartCount(restartCount, this))
+ ctx.asScala.scheduleOnce(
+ backoff.resetBackoffAfter,
+ ctx.asScala.self.unsafeUpcast[Any],
+ ResetRestartCount(restartCount, this))
case _: Restart =>
}
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala
index 209f5287ab..0d91471604 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala
@@ -132,10 +132,11 @@ import scala.concurrent.duration.FiniteDuration
OptionVal.Some(t.msg)
} else {
// it was from an old timer that was enqueued in mailbox before canceled
- log.debug("Received timer [{}] from old generation [{}], expected generation [{}], discarding",
- timerMsg.key,
- timerMsg.generation,
- t.generation)
+ log.debug(
+ "Received timer [{}] from old generation [{}], expected generation [{}], discarding",
+ timerMsg.key,
+ timerMsg.generation,
+ t.generation)
OptionVal.none // message should be ignored
}
}
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala
index f991fefd98..280432e113 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala
@@ -16,9 +16,10 @@ import scala.collection.immutable.HashMap
@InternalApi private[akka] object WithMdcBehaviorInterceptor {
val noMdcPerMessage = (_: Any) => Map.empty[String, Any]
- def apply[T](staticMdc: Map[String, Any],
- mdcForMessage: T => Map[String, Any],
- behavior: Behavior[T]): Behavior[T] = {
+ def apply[T](
+ staticMdc: Map[String, Any],
+ mdcForMessage: T => Map[String, Any],
+ behavior: Behavior[T]): Behavior[T] = {
val interceptor = new WithMdcBehaviorInterceptor[T](staticMdc, mdcForMessage)
BehaviorImpl.intercept(interceptor)(behavior)
@@ -31,8 +32,9 @@ import scala.collection.immutable.HashMap
*
* INTERNAL API
*/
-@InternalApi private[akka] final class WithMdcBehaviorInterceptor[T] private (staticMdc: Map[String, Any],
- mdcForMessage: T => Map[String, Any])
+@InternalApi private[akka] final class WithMdcBehaviorInterceptor[T] private (
+ staticMdc: Map[String, Any],
+ mdcForMessage: T => Map[String, Any])
extends BehaviorInterceptor[T, T] {
import BehaviorInterceptor._
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala
index fecc8a21b3..80fc5b3e22 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala
@@ -277,8 +277,9 @@ private[typed] class GuardianActorAdapter[T](_initialBehavior: Behavior[T]) exte
/**
* INTERNAL API
*/
-@InternalApi private[typed] final class ComposedStoppingBehavior[T](lastBehavior: Behavior[T],
- stopBehavior: StoppedBehavior[T])
+@InternalApi private[typed] final class ComposedStoppingBehavior[T](
+ lastBehavior: Behavior[T],
+ stopBehavior: StoppedBehavior[T])
extends ExtensibleBehavior[T] {
override def receive(ctx: TypedActorContext[T], msg: T): Behavior[T] =
throw new IllegalStateException("Stopping, should never receieve a message")
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala
index 740a936c1f..65bed16b88 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala
@@ -18,8 +18,9 @@ import scala.concurrent.duration._
/**
* INTERNAL API. Wrapping an [[akka.actor.ActorContext]] as an [[TypedActorContext]].
*/
-@InternalApi private[akka] final class ActorContextAdapter[T](val untypedContext: untyped.ActorContext,
- adapter: ActorAdapter[T])
+@InternalApi private[akka] final class ActorContextAdapter[T](
+ val untypedContext: untyped.ActorContext,
+ adapter: ActorAdapter[T])
extends ActorContextImpl[T] {
import ActorRefAdapter.toUntyped
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala
index 21b82a699f..60d805b859 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala
@@ -74,10 +74,11 @@ import akka.event.LoggingFilterWithMarker
}
override def dynamicAccess: untyped.DynamicAccess = untypedSystem.dynamicAccess
implicit override def executionContext: scala.concurrent.ExecutionContextExecutor = untypedSystem.dispatcher
- override val log: Logger = new LoggerAdapterImpl(untypedSystem.eventStream,
- getClass,
- name,
- LoggingFilterWithMarker.wrap(untypedSystem.logFilter))
+ override val log: Logger = new LoggerAdapterImpl(
+ untypedSystem.eventStream,
+ getClass,
+ name,
+ LoggingFilterWithMarker.wrap(untypedSystem.logFilter))
override def logConfiguration(): Unit = untypedSystem.logConfiguration()
override def name: String = untypedSystem.name
override def scheduler: akka.actor.Scheduler = untypedSystem.scheduler
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala
index 3eb2632b7c..98b1553084 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala
@@ -85,13 +85,14 @@ private[akka] abstract class AbstractLogger extends Logger {
if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3), OptionVal.Some(cause), OptionVal.Some(marker))
}
- override def error(marker: LogMarker,
- cause: Throwable,
- template: String,
- arg1: Any,
- arg2: Any,
- arg3: Any,
- arg4: Any): Unit = {
+ override def error(
+ marker: LogMarker,
+ cause: Throwable,
+ template: String,
+ arg1: Any,
+ arg2: Any,
+ arg3: Any,
+ arg4: Any): Unit = {
if (isErrorEnabled)
notifyError(format(template, arg1, arg2, arg3, arg4), OptionVal.Some(cause), OptionVal.Some(marker))
}
@@ -169,13 +170,14 @@ private[akka] abstract class AbstractLogger extends Logger {
notifyWarning(format(template, arg1, arg2, arg3), OptionVal.Some(cause), OptionVal.Some(marker))
}
- override def warning(marker: LogMarker,
- cause: Throwable,
- template: String,
- arg1: Any,
- arg2: Any,
- arg3: Any,
- arg4: Any): Unit = {
+ override def warning(
+ marker: LogMarker,
+ cause: Throwable,
+ template: String,
+ arg1: Any,
+ arg2: Any,
+ arg3: Any,
+ arg4: Any): Unit = {
if (isWarningEnabled)
notifyWarning(format(template, arg1, arg2, arg3, arg4), OptionVal.Some(cause), OptionVal.Some(marker))
}
@@ -321,13 +323,14 @@ private[akka] abstract class AbstractLogger extends Logger {
if (isLevelEnabled(level)) notify(level, format(template, arg1, arg2, arg3), OptionVal.Some(marker))
}
- override def log(level: LogLevel,
- marker: LogMarker,
- template: String,
- arg1: Any,
- arg2: Any,
- arg3: Any,
- arg4: Any): Unit = {
+ override def log(
+ level: LogLevel,
+ marker: LogMarker,
+ template: String,
+ arg1: Any,
+ arg2: Any,
+ arg3: Any,
+ arg4: Any): Unit = {
if (isLevelEnabled(level)) notify(level, format(template, arg1, arg2, arg3, arg4), OptionVal.Some(marker))
}
@@ -378,10 +381,11 @@ private[akka] abstract class AbstractLogger extends Logger {
* INTERNAL API
*/
@InternalApi
-private[akka] final class LoggerAdapterImpl(bus: LoggingBus,
- logClass: Class[_],
- logSource: String,
- loggingFilter: LoggingFilterWithMarker)
+private[akka] final class LoggerAdapterImpl(
+ bus: LoggingBus,
+ logClass: Class[_],
+ logSource: String,
+ loggingFilter: LoggingFilterWithMarker)
extends AbstractLogger {
override def isErrorEnabled = loggingFilter.isErrorEnabled(logClass, logSource)
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala
index 9f8ebc9744..8e231e168b 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala
@@ -13,9 +13,10 @@ import akka.annotation.InternalApi
* INTERNAL API
*/
@InternalApi private[akka] object PropsAdapter {
- def apply[T](behavior: () => Behavior[T],
- deploy: Props = Props.empty,
- isGuardian: Boolean = false): akka.actor.Props = {
+ def apply[T](
+ behavior: () => Behavior[T],
+ deploy: Props = Props.empty,
+ isGuardian: Boolean = false): akka.actor.Props = {
val props =
if (isGuardian)
akka.actor.Props(new GuardianActorAdapter(behavior()))
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala
index f692d851a2..a490043fef 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala
@@ -50,8 +50,9 @@ private[akka] object LocalReceptionist extends ReceptionistBehaviorProvider {
private def behavior(serviceRegistry: LocalServiceRegistry, subscriptions: SubscriptionRegistry): Behavior[Any] = {
// Helper to create new state
- def next(newRegistry: LocalServiceRegistry = serviceRegistry,
- newSubscriptions: SubscriptionRegistry = subscriptions) =
+ def next(
+ newRegistry: LocalServiceRegistry = serviceRegistry,
+ newSubscriptions: SubscriptionRegistry = subscriptions) =
behavior(newRegistry, newSubscriptions)
/*
@@ -69,8 +70,9 @@ private[akka] object LocalReceptionist extends ReceptionistBehaviorProvider {
})
// Helper that makes sure that subscribers are notified when an entry is changed
- def updateRegistry(changedKeysHint: Set[AbstractServiceKey],
- f: LocalServiceRegistry => LocalServiceRegistry): Behavior[Any] = {
+ def updateRegistry(
+ changedKeysHint: Set[AbstractServiceKey],
+ f: LocalServiceRegistry => LocalServiceRegistry): Behavior[Any] = {
val newRegistry = f(serviceRegistry)
def notifySubscribersFor[T](key: AbstractServiceKey): Unit = {
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala
index 4be59ccaac..db87c786ba 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala
@@ -22,9 +22,10 @@ private[akka] object ReceptionistMessages {
// of type erasure, more type safe factory methods for each message
// is the user API below while still hiding the type parameter so that
// users don't incorrectly match against it
- final case class Register[T] private[akka] (key: ServiceKey[T],
- serviceInstance: ActorRef[T],
- replyTo: Option[ActorRef[Receptionist.Registered]])
+ final case class Register[T] private[akka] (
+ key: ServiceKey[T],
+ serviceInstance: ActorRef[T],
+ replyTo: Option[ActorRef[Receptionist.Registered]])
extends Command
final case class Registered[T] private[akka] (key: ServiceKey[T], _serviceInstance: ActorRef[T])
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala
index 9abd3e9fbd..f8ee424cd4 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala
@@ -17,9 +17,9 @@ import akka.annotation.InternalApi
* INTERNAL API
*/
@InternalApi
-private[akka] final case class GroupRouterBuilder[T] private[akka] (key: ServiceKey[T],
- logicFactory: () => RoutingLogic[T] = () =>
- new RoutingLogics.RandomLogic[T]())
+private[akka] final case class GroupRouterBuilder[T] private[akka] (
+ key: ServiceKey[T],
+ logicFactory: () => RoutingLogic[T] = () => new RoutingLogics.RandomLogic[T]())
extends javadsl.GroupRouter[T]
with scaladsl.GroupRouter[T] {
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala
index 7d2264deba..b4e3512553 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala
@@ -13,10 +13,10 @@ import akka.annotation.InternalApi
* INTERNAL API
*/
@InternalApi
-private[akka] final case class PoolRouterBuilder[T](poolSize: Int,
- behavior: Behavior[T],
- logicFactory: () => RoutingLogic[T] = () =>
- new RoutingLogics.RoundRobinLogic[T])
+private[akka] final case class PoolRouterBuilder[T](
+ poolSize: Int,
+ behavior: Behavior[T],
+ logicFactory: () => RoutingLogic[T] = () => new RoutingLogics.RoundRobinLogic[T])
extends javadsl.PoolRouter[T]
with scaladsl.PoolRouter[T] {
if (poolSize < 1) throw new IllegalArgumentException(s"pool size must be positive, was $poolSize")
@@ -36,10 +36,11 @@ private[akka] final case class PoolRouterBuilder[T](poolSize: Int,
* INTERNAL API
*/
@InternalApi
-private final class PoolRouterImpl[T](ctx: ActorContext[T],
- poolSize: Int,
- behavior: Behavior[T],
- logic: RoutingLogic[T])
+private final class PoolRouterImpl[T](
+ ctx: ActorContext[T],
+ poolSize: Int,
+ behavior: Behavior[T],
+ logic: RoutingLogic[T])
extends AbstractBehavior[T] {
(1 to poolSize).foreach { _ =>
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala
index cce21eb5b7..7c83e4d06e 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala
@@ -281,11 +281,12 @@ trait ActorContext[T] extends TypedActorContext[T] {
* @tparam Req The request protocol, what the other actor accepts
* @tparam Res The response protocol, what the other actor sends back
*/
- def ask[Req, Res](resClass: Class[Res],
- target: RecipientRef[Req],
- responseTimeout: Duration,
- createRequest: java.util.function.Function[ActorRef[Res], Req],
- applyToResponse: BiFunction[Res, Throwable, T]): Unit
+ def ask[Req, Res](
+ resClass: Class[Res],
+ target: RecipientRef[Req],
+ responseTimeout: Duration,
+ createRequest: java.util.function.Function[ActorRef[Res], Req],
+ applyToResponse: BiFunction[Res, Throwable, T]): Unit
/**
* Sends the result of the given `CompletionStage` to this Actor (“`self`”), after adapted it with
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala
index c9821a4e42..0d22b2c33b 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala
@@ -30,9 +30,10 @@ import scala.compat.java8.FutureConverters._
*
*/
object AskPattern {
- def ask[T, U](actor: RecipientRef[T],
- message: JFunction[ActorRef[U], T],
- timeout: Duration,
- scheduler: Scheduler): CompletionStage[U] =
+ def ask[T, U](
+ actor: RecipientRef[T],
+ message: JFunction[ActorRef[U], T],
+ timeout: Duration,
+ scheduler: Scheduler): CompletionStage[U] =
(actor.ask(message.apply)(timeout.asScala, scheduler)).toJava
}
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala
index 77bec5dbed..cc0cdd2697 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala
@@ -52,9 +52,10 @@ final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signa
* @tparam M type of message to match
* @return a new behavior builder with the specified handling appended
*/
- def onMessage[M <: T](`type`: Class[M],
- test: JPredicate[M],
- handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
+ def onMessage[M <: T](
+ `type`: Class[M],
+ test: JPredicate[M],
+ handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
withMessage(OptionVal.Some(`type`), OptionVal.Some((t: T) => test.test(t.asInstanceOf[M])), handler)
/**
@@ -67,8 +68,9 @@ final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signa
* @param handler action to apply when the type matches
* @return a new behavior builder with the specified handling appended
*/
- def onMessageUnchecked[M <: T](`type`: Class[_ <: T],
- handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
+ def onMessageUnchecked[M <: T](
+ `type`: Class[_ <: T],
+ handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
withMessage[M](OptionVal.Some(`type`.asInstanceOf[Class[M]]), OptionVal.None, handler)
/**
@@ -79,11 +81,12 @@ final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signa
* @return a new behavior builder with the specified handling appended
*/
def onMessageEquals(msg: T, handler: JFunction[ActorContext[T], Behavior[T]]): BehaviorBuilder[T] =
- withMessage[T](OptionVal.Some(msg.getClass.asInstanceOf[Class[T]]),
- OptionVal.Some(_.equals(msg)),
- new JFunction2[ActorContext[T], T, Behavior[T]] {
- override def apply(ctx: ActorContext[T], msg: T): Behavior[T] = handler.apply(ctx)
- })
+ withMessage[T](
+ OptionVal.Some(msg.getClass.asInstanceOf[Class[T]]),
+ OptionVal.Some(_.equals(msg)),
+ new JFunction2[ActorContext[T], T, Behavior[T]] {
+ override def apply(ctx: ActorContext[T], msg: T): Behavior[T] = handler.apply(ctx)
+ })
/**
* Add a new case to the message handling matching any message. Subsequent `onMessage` clauses will
@@ -103,8 +106,9 @@ final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signa
* @tparam M type of signal to match
* @return a new behavior builder with the specified handling appended
*/
- def onSignal[M <: Signal](`type`: Class[M],
- handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
+ def onSignal[M <: Signal](
+ `type`: Class[M],
+ handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
withSignal(`type`, OptionVal.None, handler.asInstanceOf[JFunction2[ActorContext[T], Signal, Behavior[T]]])
/**
@@ -116,12 +120,14 @@ final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signa
* @tparam M type of signal to match
* @return a new behavior builder with the specified handling appended
*/
- def onSignal[M <: Signal](`type`: Class[M],
- test: JPredicate[M],
- handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
- withSignal(`type`,
- OptionVal.Some((t: Signal) => test.test(t.asInstanceOf[M])),
- handler.asInstanceOf[JFunction2[ActorContext[T], Signal, Behavior[T]]])
+ def onSignal[M <: Signal](
+ `type`: Class[M],
+ test: JPredicate[M],
+ handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] =
+ withSignal(
+ `type`,
+ OptionVal.Some((t: Signal) => test.test(t.asInstanceOf[M])),
+ handler.asInstanceOf[JFunction2[ActorContext[T], Signal, Behavior[T]]])
/**
* Add a new case to the signal handling matching equal signals.
@@ -137,18 +143,21 @@ final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signa
}
})
- private def withMessage[M <: T](clazz: OptionVal[Class[M]],
- test: OptionVal[M => Boolean],
- handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = {
+ private def withMessage[M <: T](
+ clazz: OptionVal[Class[M]],
+ test: OptionVal[M => Boolean],
+ handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = {
val newCase = Case(clazz, test, handler)
new BehaviorBuilder[T](newCase.asInstanceOf[Case[T, T]] +: messageHandlers, signalHandlers)
}
- private def withSignal[M <: Signal](`type`: Class[M],
- test: OptionVal[Signal => Boolean],
- handler: JFunction2[ActorContext[T], Signal, Behavior[T]]): BehaviorBuilder[T] = {
- new BehaviorBuilder[T](messageHandlers,
- Case(OptionVal.Some(`type`), test, handler).asInstanceOf[Case[T, Signal]] +: signalHandlers)
+ private def withSignal[M <: Signal](
+ `type`: Class[M],
+ test: OptionVal[Signal => Boolean],
+ handler: JFunction2[ActorContext[T], Signal, Behavior[T]]): BehaviorBuilder[T] = {
+ new BehaviorBuilder[T](
+ messageHandlers,
+ Case(OptionVal.Some(`type`), test, handler).asInstanceOf[Case[T, Signal]] +: signalHandlers)
}
}
@@ -159,9 +168,10 @@ object BehaviorBuilder {
// used for both matching signals and messages so we throw away types after they are enforced by the builder API above
/** INTERNAL API */
@InternalApi
- private[javadsl] final case class Case[BT, MT](`type`: OptionVal[Class[_ <: MT]],
- test: OptionVal[MT => Boolean],
- handler: JFunction2[ActorContext[BT], MT, Behavior[BT]])
+ private[javadsl] final case class Case[BT, MT](
+ `type`: OptionVal[Class[_ <: MT]],
+ test: OptionVal[MT => Boolean],
+ handler: JFunction2[ActorContext[BT], MT, Behavior[BT]])
/**
* @return new empty immutable behavior builder.
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala
index 20bf915754..262d277dad 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala
@@ -136,8 +136,9 @@ object Behaviors {
* that can potentially be different from this one. State is maintained by returning
* a new behavior that holds the new immutable state.
*/
- def receive[T](onMessage: JapiFunction2[ActorContext[T], T, Behavior[T]],
- onSignal: JapiFunction2[ActorContext[T], Signal, Behavior[T]]): Behavior[T] = {
+ def receive[T](
+ onMessage: JapiFunction2[ActorContext[T], T, Behavior[T]],
+ onSignal: JapiFunction2[ActorContext[T], Signal, Behavior[T]]): Behavior[T] = {
new BehaviorImpl.ReceiveBehavior((ctx, msg) => onMessage.apply(ctx.asJava, msg), {
case (ctx, sig) => onSignal.apply(ctx.asJava, sig)
})
@@ -298,8 +299,9 @@ object Behaviors {
*
* See also [[akka.actor.typed.Logger.withMdc]]
*/
- def withMdc[T](mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]],
- behavior: Behavior[T]): Behavior[T] =
+ def withMdc[T](
+ mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]],
+ behavior: Behavior[T]): Behavior[T] =
withMdc(Collections.emptyMap[String, Any], mdcForMessage, behavior)
/**
@@ -331,9 +333,10 @@ object Behaviors {
*
* See also [[akka.actor.typed.Logger.withMdc]]
*/
- def withMdc[T](staticMdc: java.util.Map[String, Any],
- mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]],
- behavior: Behavior[T]): Behavior[T] = {
+ def withMdc[T](
+ staticMdc: java.util.Map[String, Any],
+ mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]],
+ behavior: Behavior[T]): Behavior[T] = {
def asScalaMap(m: java.util.Map[String, Any]): Map[String, Any] = {
if (m == null || m.isEmpty) Map.empty[String, Any]
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala
index 7890ca9e44..109c16f38b 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala
@@ -20,8 +20,9 @@ import akka.util.OptionVal
*
* @tparam T the common superclass of all supported messages.
*/
-final class ReceiveBuilder[T] private (private var messageHandlers: List[ReceiveBuilder.Case[T, T]],
- private var signalHandlers: List[ReceiveBuilder.Case[T, Signal]]) {
+final class ReceiveBuilder[T] private (
+ private var messageHandlers: List[ReceiveBuilder.Case[T, T]],
+ private var signalHandlers: List[ReceiveBuilder.Case[T, Signal]]) {
import ReceiveBuilder.Case
@@ -108,9 +109,10 @@ final class ReceiveBuilder[T] private (private var messageHandlers: List[Receive
* @tparam M type of signal to match
* @return this behavior builder
*/
- def onSignal[M <: Signal](`type`: Class[M],
- test: JPredicate[M],
- handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] =
+ def onSignal[M <: Signal](
+ `type`: Class[M],
+ test: JPredicate[M],
+ handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] =
withSignal(`type`, OptionVal.Some(test), handler)
/**
@@ -127,16 +129,18 @@ final class ReceiveBuilder[T] private (private var messageHandlers: List[Receive
override def apply(param: Signal): Behavior[T] = handler.create()
})
- private def withMessage[M <: T](`type`: OptionVal[Class[M]],
- test: OptionVal[JPredicate[M]],
- handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = {
+ private def withMessage[M <: T](
+ `type`: OptionVal[Class[M]],
+ test: OptionVal[JPredicate[M]],
+ handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = {
messageHandlers = Case[T, M](`type`, test, handler).asInstanceOf[Case[T, T]] +: messageHandlers
this
}
- private def withSignal[M <: Signal](`type`: Class[M],
- test: OptionVal[JPredicate[M]],
- handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = {
+ private def withSignal[M <: Signal](
+ `type`: Class[M],
+ test: OptionVal[JPredicate[M]],
+ handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = {
signalHandlers = Case[T, M](OptionVal.Some(`type`), test, handler).asInstanceOf[Case[T, Signal]] +: signalHandlers
this
}
@@ -149,9 +153,10 @@ object ReceiveBuilder {
/** INTERNAL API */
@InternalApi
- private[javadsl] final case class Case[BT, MT](`type`: OptionVal[Class[_ <: MT]],
- test: OptionVal[JPredicate[MT]],
- handler: JFunction[MT, Behavior[BT]])
+ private[javadsl] final case class Case[BT, MT](
+ `type`: OptionVal[Class[_ <: MT]],
+ test: OptionVal[JPredicate[MT]],
+ handler: JFunction[MT, Behavior[BT]])
}
@@ -161,8 +166,9 @@ object ReceiveBuilder {
* INTERNAL API
*/
@InternalApi
-private final class BuiltReceive[T](messageHandlers: List[ReceiveBuilder.Case[T, T]],
- signalHandlers: List[ReceiveBuilder.Case[T, Signal]])
+private final class BuiltReceive[T](
+ messageHandlers: List[ReceiveBuilder.Case[T, T]],
+ signalHandlers: List[ReceiveBuilder.Case[T, Signal]])
extends Receive[T] {
import ReceiveBuilder.Case
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala
index a8b2463256..206f086fa0 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala
@@ -42,9 +42,10 @@ abstract class Receptionist extends Extension {
.getObjectFor[ReceptionistBehaviorProvider]("akka.cluster.typed.internal.receptionist.ClusterReceptionist")
.recover {
case e =>
- throw new RuntimeException("ClusterReceptionist could not be loaded dynamically. Make sure you have " +
- "'akka-cluster-typed' in the classpath.",
- e)
+ throw new RuntimeException(
+ "ClusterReceptionist could not be loaded dynamically. Make sure you have " +
+ "'akka-cluster-typed' in the classpath.",
+ e)
}
.get
} else LocalReceptionist
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala
index 45b27af458..6ce41cd0ad 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala
@@ -111,14 +111,16 @@ object AskPattern {
// Note: _promiseRef mustn't have a type pattern, since it can be null
private[this] val (_ref: ActorRef[U], _future: Future[U], _promiseRef) =
if (target.isTerminated)
- (adapt.ActorRefAdapter[U](target.provider.deadLetters),
- Future.failed[U](new TimeoutException(s"Recipient[$target] had already been terminated.")),
- null)
+ (
+ adapt.ActorRefAdapter[U](target.provider.deadLetters),
+ Future.failed[U](new TimeoutException(s"Recipient[$target] had already been terminated.")),
+ null)
else if (timeout.duration.length <= 0)
- (adapt.ActorRefAdapter[U](target.provider.deadLetters),
- Future.failed[U](
- new IllegalArgumentException(s"Timeout length must be positive, question not sent to [$target]")),
- null)
+ (
+ adapt.ActorRefAdapter[U](target.provider.deadLetters),
+ Future.failed[U](
+ new IllegalArgumentException(s"Timeout length must be positive, question not sent to [$target]")),
+ null)
else {
// messageClassName "unknown' is set later, after applying the message factory
val a = PromiseActorRef(target.provider, timeout, target, "unknown", onTimeout = onTimeout)
diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala
index 9813a9d17a..101d38cf97 100644
--- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala
+++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala
@@ -58,9 +58,10 @@ package object adapter {
/**
* INTERNAL API
*/
- @InternalApi private[akka] def internalSystemActorOf[U](behavior: Behavior[U],
- name: String,
- props: Props): ActorRef[U] = {
+ @InternalApi private[akka] def internalSystemActorOf[U](
+ behavior: Behavior[U],
+ name: String,
+ props: Props): ActorRef[U] = {
toUntyped.asInstanceOf[ExtendedActorSystem].systemActorOf(PropsAdapter(behavior, props), name)
}
}
diff --git a/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala b/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala
index 2d7dbdd138..2b2743782e 100644
--- a/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala
+++ b/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala
@@ -559,9 +559,10 @@ object ByteString {
else if (remainingToDrop == 0)
new ByteStrings(bytestrings.dropRight(fullDrops), length - n)
else
- new ByteStrings(bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1)
- .dropRight1(remainingToDrop),
- length - n)
+ new ByteStrings(
+ bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1)
+ .dropRight1(remainingToDrop),
+ length - n)
} else {
dropRightWithFullDropsAndRemainig(fullDrops + 1, remainingToDrop - bs.length)
}
diff --git a/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala b/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala
index 1823ec0828..1b7b8cd144 100644
--- a/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala
+++ b/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala
@@ -553,9 +553,10 @@ object ByteString {
else if (remainingToDrop == 0)
new ByteStrings(bytestrings.dropRight(fullDrops), length - n)
else
- new ByteStrings(bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1)
- .dropRight1(remainingToDrop),
- length - n)
+ new ByteStrings(
+ bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1)
+ .dropRight1(remainingToDrop),
+ length - n)
} else {
dropRightWithFullDropsAndRemainig(fullDrops + 1, remainingToDrop - bs.length)
}
diff --git a/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala b/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala
index 2b6be7e8d9..2bc879c3cc 100644
--- a/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala
+++ b/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala
@@ -51,9 +51,9 @@ package object ccompat {
fact: GenericCompanion[CC]): CanBuildFrom[Any, A, CC[A]] =
simpleCBF(fact.newBuilder[A])
- private[akka] implicit def sortedSetCompanionToCBF[A: Ordering,
- CC[X] <: c.SortedSet[X] with c.SortedSetLike[X, CC[X]]](
- fact: SortedSetFactory[CC]): CanBuildFrom[Any, A, CC[A]] =
+ private[akka] implicit def sortedSetCompanionToCBF[
+ A: Ordering,
+ CC[X] <: c.SortedSet[X] with c.SortedSetLike[X, CC[X]]](fact: SortedSetFactory[CC]): CanBuildFrom[Any, A, CC[A]] =
simpleCBF(fact.newBuilder[A])
private[ccompat] def build[T, CC](builder: m.Builder[T, CC], source: TraversableOnce[T]): CC = {
diff --git a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala
index 092f359113..7e8eaff730 100644
--- a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala
+++ b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala
@@ -93,9 +93,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param stateTimeout default state timeout for this state
* @param stateFunctionBuilder partial function builder describing response to input
*/
- final def when(stateName: S,
- stateTimeout: FiniteDuration,
- stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit =
+ final def when(
+ stateName: S,
+ stateTimeout: FiniteDuration,
+ stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit =
super.when(stateName, stateTimeout)(stateFunctionBuilder.build())
/**
@@ -108,9 +109,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param stateTimeout default state timeout for this state
* @param stateFunctionBuilder partial function builder describing response to input
*/
- final def when(stateName: S,
- stateTimeout: java.time.Duration,
- stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = {
+ final def when(
+ stateName: S,
+ stateTimeout: java.time.Duration,
+ stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = {
import JavaDurationConverters._
when(stateName, stateTimeout.asScala, stateFunctionBuilder)
}
@@ -199,10 +201,11 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[ET, DT <: D](eventType: Class[ET],
- dataType: Class[DT],
- predicate: TypedPredicate2[ET, DT],
- apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] =
+ final def matchEvent[ET, DT <: D](
+ eventType: Class[ET],
+ dataType: Class[DT],
+ predicate: TypedPredicate2[ET, DT],
+ apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] =
new FSMStateFunctionBuilder[S, D]().event(eventType, dataType, predicate, apply)
/**
@@ -215,9 +218,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[ET, DT <: D](eventType: Class[ET],
- dataType: Class[DT],
- apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] =
+ final def matchEvent[ET, DT <: D](
+ eventType: Class[ET],
+ dataType: Class[DT],
+ apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] =
new FSMStateFunctionBuilder[S, D]().event(eventType, dataType, apply)
/**
@@ -230,9 +234,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[ET](eventType: Class[ET],
- predicate: TypedPredicate2[ET, D],
- apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D] =
+ final def matchEvent[ET](
+ eventType: Class[ET],
+ predicate: TypedPredicate2[ET, D],
+ apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D] =
new FSMStateFunctionBuilder[S, D]().event(eventType, predicate, apply)
/**
@@ -256,8 +261,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent(predicate: TypedPredicate2[AnyRef, D],
- apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D] =
+ final def matchEvent(
+ predicate: TypedPredicate2[AnyRef, D],
+ apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D] =
new FSMStateFunctionBuilder[S, D]().event(predicate, apply)
/**
@@ -271,9 +277,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[DT <: D](eventMatches: JList[AnyRef],
- dataType: Class[DT],
- apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D] =
+ final def matchEvent[DT <: D](
+ eventMatches: JList[AnyRef],
+ dataType: Class[DT],
+ apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D] =
new FSMStateFunctionBuilder[S, D]().event(eventMatches, dataType, apply)
/**
@@ -299,9 +306,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEventEquals[E, DT <: D](event: E,
- dataType: Class[DT],
- apply: Apply2[E, DT, State]): FSMStateFunctionBuilder[S, D] =
+ final def matchEventEquals[E, DT <: D](
+ event: E,
+ dataType: Class[DT],
+ apply: Apply2[E, DT, State]): FSMStateFunctionBuilder[S, D] =
new FSMStateFunctionBuilder[S, D]().eventEquals(event, dataType, apply)
/**
@@ -387,9 +395,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param predicate a predicate that will be evaluated on the reason if the type matches
* @return the builder with the case statement added
*/
- final def matchStop[RT <: Reason](reasonType: Class[RT],
- predicate: TypedPredicate[RT],
- apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] =
+ final def matchStop[RT <: Reason](
+ reasonType: Class[RT],
+ predicate: TypedPredicate[RT],
+ apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] =
new FSMStopBuilder[S, D]().stop(reasonType, predicate, apply)
/**
@@ -410,9 +419,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param apply an action to apply to the argument if the type and predicate matches
* @return a builder with the case statement added
*/
- final def matchData[DT <: D](dataType: Class[DT],
- predicate: TypedPredicate[DT],
- apply: UnitApply[DT]): UnitPFBuilder[D] =
+ final def matchData[DT <: D](
+ dataType: Class[DT],
+ predicate: TypedPredicate[DT],
+ apply: UnitApply[DT]): UnitPFBuilder[D] =
UnitMatch.`match`(dataType, predicate, apply)
/**
diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala
index 1e300c1ad1..fdd807e004 100644
--- a/akka-actor/src/main/scala/akka/actor/Actor.scala
+++ b/akka-actor/src/main/scala/akka/actor/Actor.scala
@@ -116,8 +116,9 @@ final case class ActorIdentity(correlationId: Any, ref: Option[ActorRef]) {
* that the remote node hosting the watched actor was detected as unreachable
*/
@SerialVersionUID(1L)
-final case class Terminated private[akka] (@BeanProperty actor: ActorRef)(@BeanProperty val existenceConfirmed: Boolean,
- @BeanProperty val addressTerminated: Boolean)
+final case class Terminated private[akka] (@BeanProperty actor: ActorRef)(
+ @BeanProperty val existenceConfirmed: Boolean,
+ @BeanProperty val addressTerminated: Boolean)
extends AutoReceivedMessage
with PossiblyHarmful
with DeadLetterSuppression
@@ -217,10 +218,11 @@ object ActorInitializationException {
* @param messageOption is the message which was optionally passed into preRestart()
*/
@SerialVersionUID(1L)
-final case class PreRestartException private[akka] (actor: ActorRef,
- cause: Throwable,
- originalCause: Throwable,
- messageOption: Option[Any])
+final case class PreRestartException private[akka] (
+ actor: ActorRef,
+ cause: Throwable,
+ originalCause: Throwable,
+ messageOption: Option[Any])
extends ActorInitializationException(
actor,
"exception in preRestart(" +
@@ -288,9 +290,10 @@ class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaExc
* This message is published to the EventStream whenever an Actor receives a message it doesn't understand
*/
@SerialVersionUID(1L)
-final case class UnhandledMessage(@BeanProperty message: Any,
- @BeanProperty sender: ActorRef,
- @BeanProperty recipient: ActorRef)
+final case class UnhandledMessage(
+ @BeanProperty message: Any,
+ @BeanProperty sender: ActorRef,
+ @BeanProperty recipient: ActorRef)
extends NoSerializationVerificationNeeded
/**
diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala
index abbcf19e83..9653b256d0 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala
@@ -706,9 +706,10 @@ private[akka] class ActorCell(
publish(Debug(self.path.toString, clazz(actor), "now supervising " + child))
case None =>
publish(
- Error(self.path.toString,
- clazz(actor),
- "received Supervise from unregistered child " + child + ", this will not end well"))
+ Error(
+ self.path.toString,
+ clazz(actor),
+ "received Supervise from unregistered child " + child + ", this will not end well"))
}
}
diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala
index 1a869d8801..599bc0a0d2 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala
@@ -257,9 +257,10 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable {
*/
@SerialVersionUID(1L)
final case class RootActorPath(address: Address, name: String = "/") extends ActorPath {
- require(name.length == 1 || name.indexOf('/', 1) == -1,
- ("/ may only exist at the beginning of the root actors name, " +
- "it is a path separator and is not legal in ActorPath names: [%s]").format(name))
+ require(
+ name.length == 1 || name.indexOf('/', 1) == -1,
+ ("/ may only exist at the beginning of the root actors name, " +
+ "it is a path separator and is not legal in ActorPath names: [%s]").format(name))
require(name.indexOf('#') == -1, "# is a fragment separator and is not legal in ActorPath names: [%s]".format(name))
override def parent: ActorPath = this
@@ -393,10 +394,11 @@ final class ChildActorPath private[akka] (val parent: ActorPath, val name: Strin
* @param diff difference in offset for each child element, due to different address
* @param rootString function to construct the root element string
*/
- private def buildToString(sb: JStringBuilder,
- length: Int,
- diff: Int,
- rootString: RootActorPath => String): JStringBuilder = {
+ private def buildToString(
+ sb: JStringBuilder,
+ length: Int,
+ diff: Int,
+ rootString: RootActorPath => String): JStringBuilder = {
@tailrec
def rec(p: ActorPath): JStringBuilder = p match {
case r: RootActorPath =>
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
index 7441264b05..5303f47a78 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
@@ -299,12 +299,13 @@ private[akka] case object Nobody extends MinimalActorRef {
*
* INTERNAL API
*/
-private[akka] class LocalActorRef private[akka] (_system: ActorSystemImpl,
- _props: Props,
- _dispatcher: MessageDispatcher,
- _mailboxType: MailboxType,
- _supervisor: InternalActorRef,
- override val path: ActorPath)
+private[akka] class LocalActorRef private[akka] (
+ _system: ActorSystemImpl,
+ _props: Props,
+ _dispatcher: MessageDispatcher,
+ _mailboxType: MailboxType,
+ _supervisor: InternalActorRef,
+ override val path: ActorPath)
extends ActorRefWithCell
with LocalRef {
@@ -321,11 +322,12 @@ private[akka] class LocalActorRef private[akka] (_system: ActorSystemImpl,
private val actorCell: ActorCell = newActorCell(_system, this, _props, _dispatcher, _supervisor)
actorCell.init(sendSupervise = true, _mailboxType)
- protected def newActorCell(system: ActorSystemImpl,
- ref: InternalActorRef,
- props: Props,
- dispatcher: MessageDispatcher,
- supervisor: InternalActorRef): ActorCell =
+ protected def newActorCell(
+ system: ActorSystemImpl,
+ ref: InternalActorRef,
+ props: Props,
+ dispatcher: MessageDispatcher,
+ supervisor: InternalActorRef): ActorCell =
new ActorCell(system, ref, props, dispatcher, supervisor)
protected def actorContext: ActorContext = actorCell
@@ -524,9 +526,10 @@ private[akka] object DeadLetterActorRef {
*
* INTERNAL API
*/
-private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider,
- override val path: ActorPath,
- val eventStream: EventStream)
+private[akka] class EmptyLocalActorRef(
+ override val provider: ActorRefProvider,
+ override val path: ActorPath,
+ val eventStream: EventStream)
extends MinimalActorRef {
@deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2")
@@ -607,10 +610,11 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: Actor
*
* INTERNAL API
*/
-private[akka] class VirtualPathContainer(override val provider: ActorRefProvider,
- override val path: ActorPath,
- override val getParent: InternalActorRef,
- val log: MarkerLoggingAdapter)
+private[akka] class VirtualPathContainer(
+ override val provider: ActorRefProvider,
+ override val path: ActorPath,
+ override val getParent: InternalActorRef,
+ val log: MarkerLoggingAdapter)
extends MinimalActorRef {
private val children = new ConcurrentHashMap[String, InternalActorRef]
@@ -624,9 +628,10 @@ private[akka] class VirtualPathContainer(override val provider: ActorRefProvider
require(elements.nonEmpty)
def emptyRef =
- new EmptyLocalActorRef(provider,
- path / sel.elements.map(_.toString),
- provider.systemGuardian.underlying.system.eventStream)
+ new EmptyLocalActorRef(
+ provider,
+ path / sel.elements.map(_.toString),
+ provider.systemGuardian.underlying.system.eventStream)
elements.head match {
case SelectChildName(name) =>
@@ -717,10 +722,11 @@ private[akka] class VirtualPathContainer(override val provider: ActorRefProvider
* [[FunctionRef#unwatch]] must be called to avoid a resource leak, which is different
* from an ordinary actor.
*/
-private[akka] final class FunctionRef(override val path: ActorPath,
- override val provider: ActorRefProvider,
- system: ActorSystem,
- f: (ActorRef, Any) => Unit)
+private[akka] final class FunctionRef(
+ override val path: ActorPath,
+ override val provider: ActorRefProvider,
+ system: ActorSystem,
+ f: (ActorRef, Any) => Unit)
extends MinimalActorRef {
override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = {
@@ -827,9 +833,10 @@ private[akka] final class FunctionRef(override val path: ActorPath,
}
} else if (!watcheeSelf && watcherSelf) {
publish(
- Logging.Warning(path.toString,
- classOf[FunctionRef],
- s"externally triggered watch from $watcher to $watchee is illegal on FunctionRef"))
+ Logging.Warning(
+ path.toString,
+ classOf[FunctionRef],
+ s"externally triggered watch from $watcher to $watchee is illegal on FunctionRef"))
} else {
publish(
Logging.Error(path.toString, classOf[FunctionRef], s"BUG: illegal Watch($watchee,$watcher) for $this"))
@@ -859,9 +866,10 @@ private[akka] final class FunctionRef(override val path: ActorPath,
}
} else if (!watcheeSelf && watcherSelf) {
publish(
- Logging.Warning(path.toString,
- classOf[FunctionRef],
- s"externally triggered unwatch from $watcher to $watchee is illegal on FunctionRef"))
+ Logging.Warning(
+ path.toString,
+ classOf[FunctionRef],
+ s"externally triggered unwatch from $watcher to $watchee is illegal on FunctionRef"))
} else {
publish(
Logging.Error(path.toString, classOf[FunctionRef], s"BUG: illegal Unwatch($watchee,$watcher) for $this"))
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
index 5fd9d54a35..4ff3e2a120 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
@@ -111,14 +111,15 @@ import akka.util.OptionVal
* but it should be overridable from external configuration; the lookup of
* the latter can be suppressed by setting ``lookupDeploy`` to ``false``.
*/
- private[akka] def actorOf(system: ActorSystemImpl,
- props: Props,
- supervisor: InternalActorRef,
- path: ActorPath,
- systemService: Boolean,
- deploy: Option[Deploy],
- lookupDeploy: Boolean,
- async: Boolean): InternalActorRef
+ private[akka] def actorOf(
+ system: ActorSystemImpl,
+ props: Props,
+ supervisor: InternalActorRef,
+ path: ActorPath,
+ systemService: Boolean,
+ deploy: Option[Deploy],
+ lookupDeploy: Boolean,
+ async: Boolean): InternalActorRef
/**
* INTERNAL API
@@ -475,19 +476,21 @@ private[akka] object LocalActorRefProvider {
*
* Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported.
*/
-private[akka] class LocalActorRefProvider private[akka] (_systemName: String,
- override val settings: ActorSystem.Settings,
- val eventStream: EventStream,
- val dynamicAccess: DynamicAccess,
- override val deployer: Deployer,
- _deadLetters: Option[ActorPath => InternalActorRef])
+private[akka] class LocalActorRefProvider private[akka] (
+ _systemName: String,
+ override val settings: ActorSystem.Settings,
+ val eventStream: EventStream,
+ val dynamicAccess: DynamicAccess,
+ override val deployer: Deployer,
+ _deadLetters: Option[ActorPath => InternalActorRef])
extends ActorRefProvider {
// this is the constructor needed for reflectively instantiating the provider
- def this(_systemName: String,
- settings: ActorSystem.Settings,
- eventStream: EventStream,
- dynamicAccess: DynamicAccess) =
+ def this(
+ _systemName: String,
+ settings: ActorSystem.Settings,
+ eventStream: EventStream,
+ dynamicAccess: DynamicAccess) =
this(_systemName, settings, eventStream, dynamicAccess, new Deployer(settings, dynamicAccess), None)
override val rootPath: ActorPath = RootActorPath(Address("akka", _systemName))
@@ -605,12 +608,13 @@ private[akka] class LocalActorRefProvider private[akka] (_systemName: String,
private lazy val defaultMailbox = system.mailboxes.lookup(Mailboxes.DefaultMailboxId)
override lazy val rootGuardian: LocalActorRef =
- new LocalActorRef(system,
- Props(classOf[LocalActorRefProvider.Guardian], rootGuardianStrategy),
- defaultDispatcher,
- defaultMailbox,
- theOneWhoWalksTheBubblesOfSpaceTime,
- rootPath) {
+ new LocalActorRef(
+ system,
+ Props(classOf[LocalActorRefProvider.Guardian], rootGuardianStrategy),
+ defaultDispatcher,
+ defaultMailbox,
+ theOneWhoWalksTheBubblesOfSpaceTime,
+ rootPath) {
override def getParent: InternalActorRef = this
override def getSingleChild(name: String): InternalActorRef = name match {
case "temp" => tempContainer
@@ -641,12 +645,13 @@ private[akka] class LocalActorRefProvider private[akka] (_systemName: String,
override lazy val systemGuardian: LocalActorRef = {
val cell = rootGuardian.underlying
cell.reserveChild("system")
- val ref = new LocalActorRef(system,
- Props(classOf[LocalActorRefProvider.SystemGuardian], systemGuardianStrategy, guardian),
- defaultDispatcher,
- defaultMailbox,
- rootGuardian,
- rootPath / "system")
+ val ref = new LocalActorRef(
+ system,
+ Props(classOf[LocalActorRefProvider.SystemGuardian], systemGuardianStrategy, guardian),
+ defaultDispatcher,
+ defaultMailbox,
+ rootGuardian,
+ rootPath / "system")
cell.initChild(ref)
ref.start()
ref
@@ -718,9 +723,10 @@ private[akka] class LocalActorRefProvider private[akka] (_systemName: String,
def resolveActorRef(path: ActorPath): ActorRef = {
if (path.root == rootPath) resolveActorRef(rootGuardian, path.elements)
else {
- log.debug("Resolve (deserialization) of foreign path [{}] doesn't match root path [{}], using deadLetters.",
- path,
- rootPath)
+ log.debug(
+ "Resolve (deserialization) of foreign path [{}] doesn't match root path [{}], using deadLetters.",
+ path,
+ rootPath)
deadLetters
}
}
@@ -736,21 +742,23 @@ private[akka] class LocalActorRefProvider private[akka] (_systemName: String,
ref.getChild(pathElements.iterator) match {
case Nobody =>
if (log.isDebugEnabled)
- log.debug("Resolve (deserialization) of path [{}] doesn't match an active actor. " +
- "It has probably been stopped, using deadLetters.",
- pathElements.mkString("/"))
+ log.debug(
+ "Resolve (deserialization) of path [{}] doesn't match an active actor. " +
+ "It has probably been stopped, using deadLetters.",
+ pathElements.mkString("/"))
new EmptyLocalActorRef(system.provider, ref.path / pathElements, eventStream)
case x => x
}
- def actorOf(system: ActorSystemImpl,
- props: Props,
- supervisor: InternalActorRef,
- path: ActorPath,
- systemService: Boolean,
- deploy: Option[Deploy],
- lookupDeploy: Boolean,
- async: Boolean): InternalActorRef = {
+ def actorOf(
+ system: ActorSystemImpl,
+ props: Props,
+ supervisor: InternalActorRef,
+ path: ActorPath,
+ systemService: Boolean,
+ deploy: Option[Deploy],
+ lookupDeploy: Boolean,
+ async: Boolean): InternalActorRef = {
props.deploy.routerConfig match {
case NoRouter =>
if (settings.DebugRouterMisconfiguration) {
@@ -803,9 +811,10 @@ private[akka] class LocalActorRefProvider private[akka] (_systemName: String,
if (!system.dispatchers.hasDispatcher(r.routerDispatcher))
throw new ConfigurationException(s"Dispatcher [${p.dispatcher}] not configured for router of $path")
- val routerProps = Props(p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher),
- classOf[RoutedActorCell.RouterActorCreator],
- Vector(p.routerConfig))
+ val routerProps = Props(
+ p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher),
+ classOf[RoutedActorCell.RouterActorCreator],
+ Vector(p.routerConfig))
val routeeProps = p.withRouter(NoRouter)
try {
diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
index 6008b943e4..243bc33869 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
@@ -43,9 +43,10 @@ abstract class ActorSelection extends Serializable {
* Pass [[ActorRef#noSender]] or `null` as sender if there is nobody to reply to
*/
def tell(msg: Any, sender: ActorRef): Unit =
- ActorSelection.deliverSelection(anchor.asInstanceOf[InternalActorRef],
- sender,
- ActorSelectionMessage(msg, path, wildcardFanOut = false))
+ ActorSelection.deliverSelection(
+ anchor.asInstanceOf[InternalActorRef],
+ sender,
+ ActorSelectionMessage(msg, path, wildcardFanOut = false))
/**
* Forwards the message and passes the original sender actor as the sender.
@@ -235,9 +236,10 @@ object ActorSelection {
ref match {
case refWithCell: ActorRefWithCell =>
def emptyRef =
- new EmptyLocalActorRef(refWithCell.provider,
- anchor.path / sel.elements.map(_.toString),
- refWithCell.underlying.system.eventStream)
+ new EmptyLocalActorRef(
+ refWithCell.provider,
+ anchor.path / sel.elements.map(_.toString),
+ refWithCell.underlying.system.eventStream)
iter.next() match {
case SelectParent =>
@@ -271,8 +273,9 @@ object ActorSelection {
if (matchingChildren.isEmpty && !sel.wildcardFanOut)
emptyRef.tell(sel, sender)
else {
- val m = sel.copy(elements = iter.toVector,
- wildcardFanOut = sel.wildcardFanOut || matchingChildren.size > 1)
+ val m = sel.copy(
+ elements = iter.toVector,
+ wildcardFanOut = sel.wildcardFanOut || matchingChildren.size > 1)
matchingChildren.foreach(c => deliverSelection(c.asInstanceOf[InternalActorRef], sender, m))
}
}
@@ -305,9 +308,10 @@ trait ScalaActorSelection {
* message is delivered by traversing the various actor paths involved.
*/
@SerialVersionUID(2L) // it has protobuf serialization in akka-remote
-private[akka] final case class ActorSelectionMessage(msg: Any,
- elements: immutable.Iterable[SelectionPathElement],
- wildcardFanOut: Boolean)
+private[akka] final case class ActorSelectionMessage(
+ msg: Any,
+ elements: immutable.Iterable[SelectionPathElement],
+ wildcardFanOut: Boolean)
extends AutoReceivedMessage
with PossiblyHarmful {
diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
index 2d96f5abf2..37febd4956 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
@@ -45,9 +45,10 @@ object BootstrapSetup {
*
* @see [[BootstrapSetup]] for description of the properties
*/
- def apply(classLoader: Option[ClassLoader],
- config: Option[Config],
- defaultExecutionContext: Option[ExecutionContext]): BootstrapSetup =
+ def apply(
+ classLoader: Option[ClassLoader],
+ config: Option[Config],
+ defaultExecutionContext: Option[ExecutionContext]): BootstrapSetup =
new BootstrapSetup(classLoader, config, defaultExecutionContext)
/**
@@ -60,9 +61,10 @@ object BootstrapSetup {
*
* @see [[BootstrapSetup]] for description of the properties
*/
- def create(classLoader: Optional[ClassLoader],
- config: Optional[Config],
- defaultExecutionContext: Optional[ExecutionContext]): BootstrapSetup =
+ def create(
+ classLoader: Optional[ClassLoader],
+ config: Optional[Config],
+ defaultExecutionContext: Optional[ExecutionContext]): BootstrapSetup =
apply(classLoader.asScala, config.asScala, defaultExecutionContext.asScala)
/**
@@ -118,10 +120,11 @@ object ProviderSelection {
* @param actorRefProvider Overrides the `akka.actor.provider` setting in config, can be `local` (default), `remote` or
* `cluster`. It can also be a fully qualified class name of a provider.
*/
-final class BootstrapSetup private (val classLoader: Option[ClassLoader] = None,
- val config: Option[Config] = None,
- val defaultExecutionContext: Option[ExecutionContext] = None,
- val actorRefProvider: Option[ProviderSelection] = None)
+final class BootstrapSetup private (
+ val classLoader: Option[ClassLoader] = None,
+ val config: Option[Config] = None,
+ val defaultExecutionContext: Option[ExecutionContext] = None,
+ val actorRefProvider: Option[ProviderSelection] = None)
extends Setup {
def withClassloader(classLoader: ClassLoader): BootstrapSetup =
@@ -217,10 +220,11 @@ object ActorSystem {
*
* @see The Typesafe Config Library API Documentation
*/
- def create(name: String,
- config: Config,
- classLoader: ClassLoader,
- defaultExecutionContext: ExecutionContext): ActorSystem =
+ def create(
+ name: String,
+ config: Config,
+ classLoader: ClassLoader,
+ defaultExecutionContext: ExecutionContext): ActorSystem =
apply(name, Option(config), Option(classLoader), Option(defaultExecutionContext))
/**
@@ -290,10 +294,11 @@ object ActorSystem {
*
* @see The Typesafe Config Library API Documentation
*/
- def apply(name: String,
- config: Option[Config] = None,
- classLoader: Option[ClassLoader] = None,
- defaultExecutionContext: Option[ExecutionContext] = None): ActorSystem =
+ def apply(
+ name: String,
+ config: Option[Config] = None,
+ classLoader: Option[ClassLoader] = None,
+ defaultExecutionContext: Option[ExecutionContext] = None): ActorSystem =
apply(name, ActorSystemSetup(BootstrapSetup(classLoader, config, defaultExecutionContext)))
/**
@@ -678,12 +683,13 @@ abstract class ExtendedActorSystem extends ActorSystem {
* Internal API
*/
@InternalApi
-private[akka] class ActorSystemImpl(val name: String,
- applicationConfig: Config,
- classLoader: ClassLoader,
- defaultExecutionContext: Option[ExecutionContext],
- val guardianProps: Option[Props],
- setup: ActorSystemSetup)
+private[akka] class ActorSystemImpl(
+ val name: String,
+ applicationConfig: Config,
+ classLoader: ClassLoader,
+ defaultExecutionContext: Option[ExecutionContext],
+ val guardianProps: Option[Props],
+ setup: ActorSystemSetup)
extends ExtendedActorSystem {
if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-_]*$"""))
@@ -808,10 +814,11 @@ private[akka] class ActorSystemImpl(val name: String,
val scheduler: Scheduler = createScheduler()
val provider: ActorRefProvider = try {
- val arguments = Vector(classOf[String] -> name,
- classOf[Settings] -> settings,
- classOf[EventStream] -> eventStream,
- classOf[DynamicAccess] -> dynamicAccess)
+ val arguments = Vector(
+ classOf[String] -> name,
+ classOf[Settings] -> settings,
+ classOf[EventStream] -> eventStream,
+ classOf[DynamicAccess] -> dynamicAccess)
dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get
} catch {
@@ -824,14 +831,16 @@ private[akka] class ActorSystemImpl(val name: String,
val mailboxes: Mailboxes = new Mailboxes(settings, eventStream, dynamicAccess, deadLetters)
- val dispatchers: Dispatchers = new Dispatchers(settings,
- DefaultDispatcherPrerequisites(threadFactory,
- eventStream,
- scheduler,
- dynamicAccess,
- settings,
- mailboxes,
- defaultExecutionContext))
+ val dispatchers: Dispatchers = new Dispatchers(
+ settings,
+ DefaultDispatcherPrerequisites(
+ threadFactory,
+ eventStream,
+ scheduler,
+ dynamicAccess,
+ settings,
+ mailboxes,
+ defaultExecutionContext))
val dispatcher: ExecutionContextExecutor = dispatchers.defaultGlobalDispatcher
@@ -857,31 +866,32 @@ private[akka] class ActorSystemImpl(val name: String,
// Used for ManifestInfo.checkSameVersion
private def allModules: List[String] =
- List("akka-actor",
- "akka-actor-testkit-typed",
- "akka-actor-typed",
- "akka-agent",
- "akka-camel",
- "akka-cluster",
- "akka-cluster-metrics",
- "akka-cluster-sharding",
- "akka-cluster-sharding-typed",
- "akka-cluster-tools",
- "akka-cluster-typed",
- "akka-discovery",
- "akka-distributed-data",
- "akka-multi-node-testkit",
- "akka-osgi",
- "akka-persistence",
- "akka-persistence-query",
- "akka-persistence-shared",
- "akka-persistence-typed",
- "akka-protobuf",
- "akka-remote",
- "akka-slf4j",
- "akka-stream",
- "akka-stream-testkit",
- "akka-stream-typed")
+ List(
+ "akka-actor",
+ "akka-actor-testkit-typed",
+ "akka-actor-typed",
+ "akka-agent",
+ "akka-camel",
+ "akka-cluster",
+ "akka-cluster-metrics",
+ "akka-cluster-sharding",
+ "akka-cluster-sharding-typed",
+ "akka-cluster-tools",
+ "akka-cluster-typed",
+ "akka-discovery",
+ "akka-distributed-data",
+ "akka-multi-node-testkit",
+ "akka-osgi",
+ "akka-persistence",
+ "akka-persistence-query",
+ "akka-persistence-shared",
+ "akka-persistence-typed",
+ "akka-protobuf",
+ "akka-remote",
+ "akka-slf4j",
+ "akka-stream",
+ "akka-stream-testkit",
+ "akka-stream-typed")
@volatile private var _initialized = false
@@ -952,11 +962,12 @@ private[akka] class ActorSystemImpl(val name: String,
*/
protected def createScheduler(): Scheduler =
dynamicAccess
- .createInstanceFor[Scheduler](settings.SchedulerClass,
- immutable.Seq(classOf[Config] -> settings.config,
- classOf[LoggingAdapter] -> log,
- classOf[ThreadFactory] -> threadFactory.withName(
- threadFactory.name + "-scheduler")))
+ .createInstanceFor[Scheduler](
+ settings.SchedulerClass,
+ immutable.Seq(
+ classOf[Config] -> settings.config,
+ classOf[LoggingAdapter] -> log,
+ classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler")))
.get
//#create-scheduler
diff --git a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala
index 405a3a4369..8aa019ba8e 100644
--- a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala
+++ b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala
@@ -252,10 +252,11 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi
/**
* INTERNAL API
*/
- private[akka] final case class Phase(dependsOn: Set[String],
- timeout: FiniteDuration,
- recover: Boolean,
- enabled: Boolean)
+ private[akka] final case class Phase(
+ dependsOn: Set[String],
+ timeout: FiniteDuration,
+ recover: Boolean,
+ enabled: Boolean)
/**
* INTERNAL API
@@ -317,8 +318,9 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi
}
-final class CoordinatedShutdown private[akka] (system: ExtendedActorSystem,
- phases: Map[String, CoordinatedShutdown.Phase])
+final class CoordinatedShutdown private[akka] (
+ system: ExtendedActorSystem,
+ phases: Map[String, CoordinatedShutdown.Phase])
extends Extension {
import CoordinatedShutdown.Reason
import CoordinatedShutdown.UnknownReason
@@ -354,12 +356,14 @@ final class CoordinatedShutdown private[akka] (system: ExtendedActorSystem,
* and it will be performed.
*/
@tailrec def addTask(phase: String, taskName: String)(task: () => Future[Done]): Unit = {
- require(knownPhases(phase),
- s"Unknown phase [$phase], known phases [$knownPhases]. " +
- "All phases (along with their optional dependencies) must be defined in configuration")
- require(taskName.nonEmpty,
- "Set a task name when adding tasks to the Coordinated Shutdown. " +
- "Try to use unique, self-explanatory names.")
+ require(
+ knownPhases(phase),
+ s"Unknown phase [$phase], known phases [$knownPhases]. " +
+ "All phases (along with their optional dependencies) must be defined in configuration")
+ require(
+ taskName.nonEmpty,
+ "Set a task name when adding tasks to the Coordinated Shutdown. " +
+ "Try to use unique, self-explanatory names.")
val current = tasks.get(phase)
if (current == null) {
if (tasks.putIfAbsent(phase, Vector(taskName -> task)) != null)
@@ -449,10 +453,11 @@ final class CoordinatedShutdown private[akka] (system: ExtendedActorSystem,
Future.successful(Done)
case tasks =>
if (debugEnabled)
- log.debug("Performing phase [{}] with [{}] tasks: [{}]",
- phase,
- tasks.size,
- tasks.map { case (taskName, _) => taskName }.mkString(", "))
+ log.debug(
+ "Performing phase [{}] with [{}] tasks: [{}]",
+ phase,
+ tasks.size,
+ tasks.map { case (taskName, _) => taskName }.mkString(", "))
// note that tasks within same phase are performed in parallel
val recoverEnabled = phases(phase).recover
val result = Future
diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala
index ded4b94087..a5e7bcf0f0 100644
--- a/akka-actor/src/main/scala/akka/actor/Deployer.scala
+++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala
@@ -34,12 +34,13 @@ object Deploy {
* }}}
*/
@SerialVersionUID(2L)
-final case class Deploy(path: String = "",
- config: Config = ConfigFactory.empty,
- routerConfig: RouterConfig = NoRouter,
- scope: Scope = NoScopeGiven,
- dispatcher: String = Deploy.NoDispatcherGiven,
- mailbox: String = Deploy.NoMailboxGiven) {
+final case class Deploy(
+ path: String = "",
+ config: Config = ConfigFactory.empty,
+ routerConfig: RouterConfig = NoRouter,
+ scope: Scope = NoScopeGiven,
+ dispatcher: String = Deploy.NoDispatcherGiven,
+ mailbox: String = Deploy.NoMailboxGiven) {
/**
* Java API to create a Deploy with the given RouterConfig
@@ -62,12 +63,13 @@ final case class Deploy(path: String = "",
* other members are merged using `X.withFallback(other.X)`.
*/
def withFallback(other: Deploy): Deploy = {
- Deploy(path,
- config.withFallback(other.config),
- routerConfig.withFallback(other.routerConfig),
- scope.withFallback(other.scope),
- if (dispatcher == Deploy.NoDispatcherGiven) other.dispatcher else dispatcher,
- if (mailbox == Deploy.NoMailboxGiven) other.mailbox else mailbox)
+ Deploy(
+ path,
+ config.withFallback(other.config),
+ routerConfig.withFallback(other.routerConfig),
+ scope.withFallback(other.scope),
+ if (dispatcher == Deploy.NoDispatcherGiven) other.dispatcher else dispatcher,
+ if (mailbox == Deploy.NoMailboxGiven) other.mailbox else mailbox)
}
}
diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala
index 9afbc98c59..fcc32202f7 100644
--- a/akka-actor/src/main/scala/akka/actor/FSM.scala
+++ b/akka-actor/src/main/scala/akka/actor/FSM.scala
@@ -136,11 +136,12 @@ object FSM {
* INTERNAL API
* Using a subclass for binary compatibility reasons
*/
- private[akka] class SilentState[S, D](_stateName: S,
- _stateData: D,
- _timeout: Option[FiniteDuration],
- _stopReason: Option[Reason],
- _replies: List[Any])
+ private[akka] class SilentState[S, D](
+ _stateName: S,
+ _stateData: D,
+ _timeout: Option[FiniteDuration],
+ _stopReason: Option[Reason],
+ _replies: List[Any])
extends State[S, D](_stateName, _stateData, _timeout, _stopReason, _replies) {
/**
@@ -148,11 +149,12 @@ object FSM {
*/
private[akka] override def notifies: Boolean = false
- override def copy(stateName: S = stateName,
- stateData: D = stateData,
- timeout: Option[FiniteDuration] = timeout,
- stopReason: Option[Reason] = stopReason,
- replies: List[Any] = replies): State[S, D] = {
+ override def copy(
+ stateName: S = stateName,
+ stateData: D = stateData,
+ timeout: Option[FiniteDuration] = timeout,
+ stopReason: Option[Reason] = stopReason,
+ replies: List[Any] = replies): State[S, D] = {
new SilentState(stateName, stateData, timeout, stopReason, replies)
}
}
@@ -162,11 +164,12 @@ object FSM {
* name, the state data, possibly custom timeout, stop reason and replies
* accumulated while processing the last message.
*/
- case class State[S, D](stateName: S,
- stateData: D,
- timeout: Option[FiniteDuration] = None,
- stopReason: Option[Reason] = None,
- replies: List[Any] = Nil) {
+ case class State[S, D](
+ stateName: S,
+ stateData: D,
+ timeout: Option[FiniteDuration] = None,
+ stopReason: Option[Reason] = None,
+ replies: List[Any] = Nil) {
/**
* INTERNAL API
@@ -174,11 +177,12 @@ object FSM {
private[akka] def notifies: Boolean = true
// defined here to be able to override it in SilentState
- def copy(stateName: S = stateName,
- stateData: D = stateData,
- timeout: Option[FiniteDuration] = timeout,
- stopReason: Option[Reason] = stopReason,
- replies: List[Any] = replies): State[S, D] = {
+ def copy(
+ stateName: S = stateName,
+ stateData: D = stateData,
+ timeout: Option[FiniteDuration] = timeout,
+ stopReason: Option[Reason] = stopReason,
+ replies: List[Any] = replies): State[S, D] = {
new State(stateName, stateData, timeout, stopReason, replies)
}
diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
index 8e53ddf300..9a79ec98b3 100644
--- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
+++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
@@ -33,9 +33,10 @@ private[akka] case object ChildNameReserved extends ChildStats
* ChildRestartStats is the statistics kept by every parent Actor for every child Actor
* and is used for SupervisorStrategies to know how to deal with problems that occur for the children.
*/
-final case class ChildRestartStats(child: ActorRef,
- var maxNrOfRetriesCount: Int = 0,
- var restartTimeWindowStartNanos: Long = 0L)
+final case class ChildRestartStats(
+ child: ActorRef,
+ var maxNrOfRetriesCount: Int = 0,
+ var restartTimeWindowStartNanos: Long = 0L)
extends ChildStats {
def uid: Int = child.path.uid
@@ -282,12 +283,13 @@ abstract class SupervisorStrategy {
/**
* This method is called to act on the failure of a child: restart if the flag is true, stop otherwise.
*/
- def processFailure(context: ActorContext,
- restart: Boolean,
- child: ActorRef,
- cause: Throwable,
- stats: ChildRestartStats,
- children: Iterable[ChildRestartStats]): Unit
+ def processFailure(
+ context: ActorContext,
+ restart: Boolean,
+ child: ActorRef,
+ cause: Throwable,
+ stats: ChildRestartStats,
+ children: Iterable[ChildRestartStats]): Unit
/**
* This is the main entry point: in case of a child’s failure, this method
@@ -303,11 +305,12 @@ abstract class SupervisorStrategy {
*
* @param children is a lazy collection (a view)
*/
- def handleFailure(context: ActorContext,
- child: ActorRef,
- cause: Throwable,
- stats: ChildRestartStats,
- children: Iterable[ChildRestartStats]): Boolean = {
+ def handleFailure(
+ context: ActorContext,
+ child: ActorRef,
+ cause: Throwable,
+ stats: ChildRestartStats,
+ children: Iterable[ChildRestartStats]): Boolean = {
val directive = decider.applyOrElse(cause, escalateDefault)
directive match {
case Resume =>
@@ -401,9 +404,10 @@ abstract class SupervisorStrategy {
* [[scala.collection.immutable.Seq]] of Throwables which maps the given Throwables to restarts, otherwise escalates.
* @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled
*/
-case class AllForOneStrategy(maxNrOfRetries: Int = -1,
- withinTimeRange: Duration = Duration.Inf,
- override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
+case class AllForOneStrategy(
+ maxNrOfRetries: Int = -1,
+ withinTimeRange: Duration = Duration.Inf,
+ override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
extends SupervisorStrategy {
import SupervisorStrategy._
@@ -411,19 +415,21 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1,
/**
* Java API
*/
- def this(maxNrOfRetries: Int,
- withinTimeRange: Duration,
- decider: SupervisorStrategy.JDecider,
- loggingEnabled: Boolean) =
+ def this(
+ maxNrOfRetries: Int,
+ withinTimeRange: Duration,
+ decider: SupervisorStrategy.JDecider,
+ loggingEnabled: Boolean) =
this(maxNrOfRetries, withinTimeRange, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
/**
* Java API
*/
- def this(maxNrOfRetries: Int,
- withinTimeRange: java.time.Duration,
- decider: SupervisorStrategy.JDecider,
- loggingEnabled: Boolean) =
+ def this(
+ maxNrOfRetries: Int,
+ withinTimeRange: java.time.Duration,
+ decider: SupervisorStrategy.JDecider,
+ loggingEnabled: Boolean) =
this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
/**
@@ -484,12 +490,13 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1,
def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit = ()
- def processFailure(context: ActorContext,
- restart: Boolean,
- child: ActorRef,
- cause: Throwable,
- stats: ChildRestartStats,
- children: Iterable[ChildRestartStats]): Unit = {
+ def processFailure(
+ context: ActorContext,
+ restart: Boolean,
+ child: ActorRef,
+ cause: Throwable,
+ stats: ChildRestartStats,
+ children: Iterable[ChildRestartStats]): Unit = {
if (children.nonEmpty) {
if (restart && children.forall(_.requestRestartPermission(retriesWindow)))
children.foreach(crs => restartChild(crs.child, cause, suspendFirst = (crs.child != child)))
@@ -511,27 +518,30 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1,
* [[scala.collection.immutable.Seq]] of Throwables which maps the given Throwables to restarts, otherwise escalates.
* @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled
*/
-case class OneForOneStrategy(maxNrOfRetries: Int = -1,
- withinTimeRange: Duration = Duration.Inf,
- override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
+case class OneForOneStrategy(
+ maxNrOfRetries: Int = -1,
+ withinTimeRange: Duration = Duration.Inf,
+ override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
extends SupervisorStrategy {
/**
* Java API
*/
- def this(maxNrOfRetries: Int,
- withinTimeRange: Duration,
- decider: SupervisorStrategy.JDecider,
- loggingEnabled: Boolean) =
+ def this(
+ maxNrOfRetries: Int,
+ withinTimeRange: Duration,
+ decider: SupervisorStrategy.JDecider,
+ loggingEnabled: Boolean) =
this(maxNrOfRetries, withinTimeRange, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
/**
* Java API
*/
- def this(maxNrOfRetries: Int,
- withinTimeRange: java.time.Duration,
- decider: SupervisorStrategy.JDecider,
- loggingEnabled: Boolean) =
+ def this(
+ maxNrOfRetries: Int,
+ withinTimeRange: java.time.Duration,
+ decider: SupervisorStrategy.JDecider,
+ loggingEnabled: Boolean) =
this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider))
/**
@@ -586,17 +596,19 @@ case class OneForOneStrategy(maxNrOfRetries: Int = -1,
* every call to requestRestartPermission, assuming that strategies are shared
* across actors and thus this field does not take up much space
*/
- private val retriesWindow = (SupervisorStrategy.maxNrOfRetriesOption(maxNrOfRetries),
- SupervisorStrategy.withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt))
+ private val retriesWindow = (
+ SupervisorStrategy.maxNrOfRetriesOption(maxNrOfRetries),
+ SupervisorStrategy.withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt))
def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit = ()
- def processFailure(context: ActorContext,
- restart: Boolean,
- child: ActorRef,
- cause: Throwable,
- stats: ChildRestartStats,
- children: Iterable[ChildRestartStats]): Unit = {
+ def processFailure(
+ context: ActorContext,
+ restart: Boolean,
+ child: ActorRef,
+ cause: Throwable,
+ stats: ChildRestartStats,
+ children: Iterable[ChildRestartStats]): Unit = {
if (restart && stats.requestRestartPermission(retriesWindow))
restartChild(child, cause, suspendFirst = false)
else
diff --git a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala
index 955a0b355a..ed967b9f0f 100644
--- a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala
+++ b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala
@@ -48,8 +48,9 @@ class LightArrayRevolverScheduler(config: Config, log: LoggingAdapter, threadFac
val TickDuration =
config
.getMillisDuration("akka.scheduler.tick-duration")
- .requiring(_ >= 10.millis || !Helpers.isWindows,
- "minimum supported akka.scheduler.tick-duration on Windows is 10ms")
+ .requiring(
+ _ >= 10.millis || !Helpers.isWindows,
+ "minimum supported akka.scheduler.tick-duration on Windows is 10ms")
.requiring(_ >= 1.millis, "minimum supported akka.scheduler.tick-duration is 1ms")
val ShutdownTimeout = config.getMillisDuration("akka.scheduler.shutdown-timeout")
@@ -93,24 +94,23 @@ class LightArrayRevolverScheduler(config: Config, log: LoggingAdapter, threadFac
implicit executor: ExecutionContext): Cancellable = {
checkMaxDelay(roundUp(delay).toNanos)
try new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self =>
- compareAndSet(InitialRepeatMarker,
- schedule(executor,
- new AtomicLong(clock() + initialDelay.toNanos) with Runnable {
- override def run(): Unit = {
- try {
- runnable.run()
- val driftNanos = clock() - getAndAdd(delay.toNanos)
- if (self.get != null)
- swap(
- schedule(executor,
- this,
- Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1))))
- } catch {
- case _: SchedulerException => // ignore failure to enqueue or terminated target actor
- }
- }
- },
- roundUp(initialDelay)))
+ compareAndSet(
+ InitialRepeatMarker,
+ schedule(
+ executor,
+ new AtomicLong(clock() + initialDelay.toNanos) with Runnable {
+ override def run(): Unit = {
+ try {
+ runnable.run()
+ val driftNanos = clock() - getAndAdd(delay.toNanos)
+ if (self.get != null)
+ swap(schedule(executor, this, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1))))
+ } catch {
+ case _: SchedulerException => // ignore failure to enqueue or terminated target actor
+ }
+ }
+ },
+ roundUp(initialDelay)))
@tailrec private def swap(c: Cancellable): Unit = {
get match {
diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala
index 7621cf1b8b..016a582dc0 100644
--- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala
+++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala
@@ -25,12 +25,13 @@ import scala.util.control.NonFatal
* with a fully functional one, transfer all messages from dummy to real queue
* and swap out the cell ref.
*/
-private[akka] class RepointableActorRef(val system: ActorSystemImpl,
- val props: Props,
- val dispatcher: MessageDispatcher,
- val mailboxType: MailboxType,
- val supervisor: InternalActorRef,
- val path: ActorPath)
+private[akka] class RepointableActorRef(
+ val system: ActorSystemImpl,
+ val props: Props,
+ val dispatcher: MessageDispatcher,
+ val mailboxType: MailboxType,
+ val supervisor: InternalActorRef,
+ val path: ActorPath)
extends ActorRefWithCell
with RepointableRef {
@@ -180,10 +181,11 @@ private[akka] class RepointableActorRef(val system: ActorSystemImpl,
protected def writeReplace(): AnyRef = SerializedActorRef(this)
}
-private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl,
- val self: RepointableActorRef,
- val props: Props,
- val supervisor: InternalActorRef)
+private[akka] class UnstartedCell(
+ val systemImpl: ActorSystemImpl,
+ val self: RepointableActorRef,
+ val props: Props,
+ val supervisor: InternalActorRef)
extends Cell {
/*
@@ -251,17 +253,19 @@ private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl,
cell.sendMessage(msg)
} else if (!queue.offer(msg)) {
system.eventStream.publish(
- Warning(self.path.toString,
- getClass,
- "dropping message of type " + msg.message.getClass + " due to enqueue failure"))
+ Warning(
+ self.path.toString,
+ getClass,
+ "dropping message of type " + msg.message.getClass + " due to enqueue failure"))
system.deadLetters.tell(DeadLetter(msg.message, msg.sender, self), msg.sender)
} else if (Mailbox.debug) println(s"$self temp queueing ${msg.message} from ${msg.sender}")
} finally lock.unlock()
} else {
system.eventStream.publish(
- Warning(self.path.toString,
- getClass,
- "dropping message of type" + msg.message.getClass + " due to lock timeout"))
+ Warning(
+ self.path.toString,
+ getClass,
+ "dropping message of type" + msg.message.getClass + " due to lock timeout"))
system.deadLetters.tell(DeadLetter(msg.message, msg.sender, self), msg.sender)
}
}
diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala
index d8071b338a..193a48dec7 100644
--- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala
+++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala
@@ -51,15 +51,16 @@ trait Scheduler {
implicit
executor: ExecutionContext,
sender: ActorRef = Actor.noSender): Cancellable =
- schedule(initialDelay,
- interval,
- new Runnable {
- def run(): Unit = {
- receiver ! message
- if (receiver.isTerminated)
- throw SchedulerException("timer active for terminated actor")
- }
- })
+ schedule(
+ initialDelay,
+ interval,
+ new Runnable {
+ def run(): Unit = {
+ receiver ! message
+ if (receiver.isTerminated)
+ throw SchedulerException("timer active for terminated actor")
+ }
+ })
/**
* Schedules a message to be sent repeatedly with an initial delay and
@@ -69,12 +70,13 @@ trait Scheduler {
*
* Java API
*/
- final def schedule(initialDelay: java.time.Duration,
- interval: java.time.Duration,
- receiver: ActorRef,
- message: Any,
- executor: ExecutionContext,
- sender: ActorRef): Cancellable = {
+ final def schedule(
+ initialDelay: java.time.Duration,
+ interval: java.time.Duration,
+ receiver: ActorRef,
+ message: Any,
+ executor: ExecutionContext,
+ sender: ActorRef): Cancellable = {
import JavaDurationConverters._
schedule(initialDelay.asScala, interval.asScala, receiver, message)(executor, sender)
}
@@ -172,11 +174,12 @@ trait Scheduler {
*
* Java API
*/
- final def scheduleOnce(delay: java.time.Duration,
- receiver: ActorRef,
- message: Any,
- executor: ExecutionContext,
- sender: ActorRef): Cancellable = {
+ final def scheduleOnce(
+ delay: java.time.Duration,
+ receiver: ActorRef,
+ message: Any,
+ executor: ExecutionContext,
+ sender: ActorRef): Cancellable = {
import JavaDurationConverters._
scheduleOnce(delay.asScala, receiver, message)(executor, sender)
}
@@ -190,8 +193,9 @@ trait Scheduler {
*
* Scala API
*/
- final def scheduleOnce(delay: FiniteDuration)(f: => Unit)(implicit
- executor: ExecutionContext): Cancellable =
+ final def scheduleOnce(delay: FiniteDuration)(f: => Unit)(
+ implicit
+ executor: ExecutionContext): Cancellable =
scheduleOnce(delay, new Runnable { override def run(): Unit = f })
/**
diff --git a/akka-actor/src/main/scala/akka/actor/Stash.scala b/akka-actor/src/main/scala/akka/actor/Stash.scala
index 66a13400cd..ee49e43845 100644
--- a/akka-actor/src/main/scala/akka/actor/Stash.scala
+++ b/akka-actor/src/main/scala/akka/actor/Stash.scala
@@ -149,9 +149,10 @@ private[akka] trait StashSupport {
actorCell.mailbox.messageQueue match {
case queue: DequeBasedMessageQueueSemantics => queue
case other =>
- throw ActorInitializationException(self,
- s"DequeBasedMailbox required, got: ${other.getClass.getName}\n" +
- """An (unbounded) deque-based mailbox can be configured as follows:
+ throw ActorInitializationException(
+ self,
+ s"DequeBasedMailbox required, got: ${other.getClass.getName}\n" +
+ """An (unbounded) deque-based mailbox can be configured as follows:
| my-custom-mailbox {
| mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox"
| }
diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
index d2910b5cad..fe021dc9e0 100644
--- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala
+++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
@@ -170,10 +170,11 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
*
* Represents the serialized form of a MethodCall, uses readResolve and writeReplace to marshall the call
*/
- private[akka] final case class SerializedMethodCall(ownerType: Class[_],
- methodName: String,
- parameterTypes: Array[Class[_]],
- serializedParameters: Array[(Int, String, Array[Byte])]) {
+ private[akka] final case class SerializedMethodCall(
+ ownerType: Class[_],
+ methodName: String,
+ parameterTypes: Array[Class[_]],
+ serializedParameters: Array[(Int, String, Array[Byte])]) {
//TODO implement writeObject and readObject to serialize
//TODO Possible optimization is to special encode the parameter-types to conserve space
@@ -184,20 +185,20 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
"Trying to deserialize a SerializedMethodCall without an ActorSystem in scope." +
" Use akka.serialization.JavaSerializer.currentSystem.withValue(system) { ... }")
val serialization = SerializationExtension(system)
- MethodCall(ownerType.getDeclaredMethod(methodName, parameterTypes: _*),
- serializedParameters match {
- case null => null
- case a if a.length == 0 => Array[AnyRef]()
- case a =>
- val deserializedParameters
- : Array[AnyRef] = new Array[AnyRef](a.length) //Mutable for the sake of sanity
- for (i <- 0 until a.length) {
- val (sId, manifest, bytes) = a(i)
- deserializedParameters(i) = serialization.deserialize(bytes, sId, manifest).get
- }
+ MethodCall(
+ ownerType.getDeclaredMethod(methodName, parameterTypes: _*),
+ serializedParameters match {
+ case null => null
+ case a if a.length == 0 => Array[AnyRef]()
+ case a =>
+ val deserializedParameters: Array[AnyRef] = new Array[AnyRef](a.length) //Mutable for the sake of sanity
+ for (i <- 0 until a.length) {
+ val (sId, manifest, bytes) = a(i)
+ deserializedParameters(i) = serialization.deserialize(bytes, sId, manifest).get
+ }
- deserializedParameters
- })
+ deserializedParameters
+ })
}
}
@@ -251,9 +252,10 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
*
* Implementation of TypedActor as an Actor
*/
- private[akka] class TypedActor[R <: AnyRef, T <: R](val proxyVar: AtomVar[R],
- createInstance: => T,
- interfaces: immutable.Seq[Class[_]])
+ private[akka] class TypedActor[R <: AnyRef, T <: R](
+ val proxyVar: AtomVar[R],
+ createInstance: => T,
+ interfaces: immutable.Seq[Class[_]])
extends Actor {
// if we were remote deployed we need to create a local proxy
if (!context.parent.asInstanceOf[InternalActorRef].isLocal)
@@ -425,9 +427,10 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
/**
* INTERNAL API
*/
- private[akka] class TypedActorInvocationHandler(@transient val extension: TypedActorExtension,
- @transient val actorVar: AtomVar[ActorRef],
- @transient val timeout: Timeout)
+ private[akka] class TypedActorInvocationHandler(
+ @transient val extension: TypedActorExtension,
+ @transient val actorVar: AtomVar[ActorRef],
+ @transient val timeout: Timeout)
extends InvocationHandler
with Serializable {
@@ -474,8 +477,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
/**
* INTERNAL API
*/
- private[akka] final case class SerializedTypedActorInvocationHandler(val actor: ActorRef,
- val timeout: FiniteDuration) {
+ private[akka] final case class SerializedTypedActorInvocationHandler(
+ val actor: ActorRef,
+ val timeout: FiniteDuration) {
@throws(classOf[ObjectStreamException]) private def readResolve(): AnyRef =
JavaSerializer.currentSystem.value match {
case null =>
@@ -696,9 +700,10 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac
/**
* INTERNAL API
*/
- private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T],
- proxyVar: AtomVar[R],
- actorRef: => ActorRef): R = {
+ private[akka] def createActorRefProxy[R <: AnyRef, T <: R](
+ props: TypedProps[T],
+ proxyVar: AtomVar[R],
+ actorRef: => ActorRef): R = {
//Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling
val actorVar = new AtomVar[ActorRef](null)
val proxy = Proxy
diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala
index c45a64b895..53a6fafdeb 100644
--- a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala
+++ b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala
@@ -248,11 +248,12 @@ private[akka] trait Children { this: ActorCell =>
}
}
- private def makeChild(cell: ActorCell,
- props: Props,
- name: String,
- async: Boolean,
- systemService: Boolean): ActorRef = {
+ private def makeChild(
+ cell: ActorCell,
+ props: Props,
+ name: String,
+ async: Boolean,
+ systemService: Boolean): ActorRef = {
if (cell.system.settings.SerializeAllCreators && !systemService && props.deploy.scope != LocalScope) {
val oldInfo = Serialization.currentTransportInformation.value
try {
@@ -288,14 +289,15 @@ private[akka] trait Children { this: ActorCell =>
val actor =
try {
val childPath = new ChildActorPath(cell.self.path, name, ActorCell.newUid())
- cell.provider.actorOf(cell.systemImpl,
- props,
- cell.self,
- childPath,
- systemService = systemService,
- deploy = None,
- lookupDeploy = true,
- async = async)
+ cell.provider.actorOf(
+ cell.systemImpl,
+ props,
+ cell.self,
+ childPath,
+ systemService = systemService,
+ deploy = None,
+ lookupDeploy = true,
+ async = async)
} catch {
case e: InterruptedException =>
unreserveChild(name)
diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala
index c04652af58..1f0f97a61e 100644
--- a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala
+++ b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala
@@ -160,9 +160,10 @@ private[akka] object ChildrenContainer {
* type of container, depending on whether or not children are left and whether or not
* the reason was “Terminating”.
*/
- final case class TerminatingChildrenContainer(c: immutable.TreeMap[String, ChildStats],
- toDie: Set[ActorRef],
- reason: SuspendReason)
+ final case class TerminatingChildrenContainer(
+ c: immutable.TreeMap[String, ChildStats],
+ toDie: Set[ActorRef],
+ reason: SuspendReason)
extends ChildrenContainer {
override def add(name: String, stats: ChildRestartStats): ChildrenContainer = copy(c.updated(name, stats))
diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala
index b9c5c22353..f2fdc6f464 100644
--- a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala
+++ b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala
@@ -70,9 +70,10 @@ private[akka] trait DeathWatch { this: ActorCell =>
* When this actor is watching the subject of [[akka.actor.Terminated]] message
* it will be propagated to user's receive.
*/
- protected def watchedActorTerminated(actor: ActorRef,
- existenceConfirmed: Boolean,
- addressTerminated: Boolean): Unit = {
+ protected def watchedActorTerminated(
+ actor: ActorRef,
+ existenceConfirmed: Boolean,
+ addressTerminated: Boolean): Unit = {
watchingGet(actor) match {
case None => // We're apparently no longer watching this actor.
case Some(optionalMessage) =>
diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala
index db28ae646c..a0db8db0bc 100644
--- a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala
+++ b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala
@@ -194,10 +194,11 @@ private[akka] trait FaultHandling { this: ActorCell =>
}
} catch handleNonFatalOrInterruptedException { e =>
publish(
- Error(e,
- self.path.toString,
- clazz(actor),
- "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t)))
+ Error(
+ e,
+ self.path.toString,
+ clazz(actor),
+ "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t)))
try children.foreach(stop)
finally finishTerminate()
}
diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala
index 5f3d965171..b3076f2954 100644
--- a/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala
+++ b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala
@@ -116,10 +116,11 @@ import akka.util.OptionVal
OptionVal.Some(t.msg.asInstanceOf[AnyRef])
} else {
// it was from an old timer that was enqueued in mailbox before canceled
- log.debug("Received timer [{}] from from old generation [{}], expected generation [{}], discarding",
- timerMsg.key,
- timerMsg.generation,
- t.generation)
+ log.debug(
+ "Received timer [{}] from from old generation [{}], expected generation [{}], discarding",
+ timerMsg.key,
+ timerMsg.generation,
+ t.generation)
OptionVal.None // message should be ignored
}
}
diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala
index 44bdea874b..654d168cee 100644
--- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala
@@ -282,9 +282,10 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator
*
* INTERNAL API
*/
- protected[akka] def registerForExecution(mbox: Mailbox,
- hasMessageHint: Boolean,
- hasSystemMessageHint: Boolean): Boolean
+ protected[akka] def registerForExecution(
+ mbox: Mailbox,
+ hasMessageHint: Boolean,
+ hasSystemMessageHint: Boolean): Boolean
// TODO check whether this should not actually be a property of the mailbox
/**
@@ -362,9 +363,10 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites:
config.getString("executor") match {
case "default-executor" =>
- new DefaultExecutorServiceConfigurator(config.getConfig("default-executor"),
- prerequisites,
- configurator(config.getString("default-executor.fallback")))
+ new DefaultExecutorServiceConfigurator(
+ config.getConfig("default-executor"),
+ prerequisites,
+ configurator(config.getString("default-executor.fallback")))
case other => configurator(other)
}
}
@@ -400,12 +402,14 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr
if (config.getString("fixed-pool-size") == "off")
builder
- .setCorePoolSizeFromFactor(config.getInt("core-pool-size-min"),
- config.getDouble("core-pool-size-factor"),
- config.getInt("core-pool-size-max"))
- .setMaxPoolSizeFromFactor(config.getInt("max-pool-size-min"),
- config.getDouble("max-pool-size-factor"),
- config.getInt("max-pool-size-max"))
+ .setCorePoolSizeFromFactor(
+ config.getInt("core-pool-size-min"),
+ config.getDouble("core-pool-size-factor"),
+ config.getInt("core-pool-size-max"))
+ .setMaxPoolSizeFromFactor(
+ config.getInt("max-pool-size-min"),
+ config.getDouble("max-pool-size-factor"),
+ config.getInt("max-pool-size-max"))
else
builder.setFixedPoolSize(config.getInt("fixed-pool-size"))
}
@@ -414,17 +418,19 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr
threadPoolConfig.createExecutorServiceFactory(id, threadFactory)
}
-class DefaultExecutorServiceConfigurator(config: Config,
- prerequisites: DispatcherPrerequisites,
- fallback: ExecutorServiceConfigurator)
+class DefaultExecutorServiceConfigurator(
+ config: Config,
+ prerequisites: DispatcherPrerequisites,
+ fallback: ExecutorServiceConfigurator)
extends ExecutorServiceConfigurator(config, prerequisites) {
val provider: ExecutorServiceFactoryProvider =
prerequisites.defaultExecutionContext match {
case Some(ec) =>
prerequisites.eventStream.publish(
- Debug("DefaultExecutorServiceConfigurator",
- this.getClass,
- s"Using passed in ExecutionContext as default executor for this ActorSystem. If you want to use a different executor, please specify one in akka.actor.default-dispatcher.default-executor."))
+ Debug(
+ "DefaultExecutorServiceConfigurator",
+ this.getClass,
+ s"Using passed in ExecutionContext as default executor for this ActorSystem. If you want to use a different executor, please specify one in akka.actor.default-dispatcher.default-executor."))
new AbstractExecutorService with ExecutorServiceFactory with ExecutorServiceFactoryProvider {
def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = this
diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala
index 730b6e228c..bda62ede90 100644
--- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala
@@ -31,20 +31,22 @@ import scala.concurrent.duration.FiniteDuration
* @see akka.dispatch.Dispatchers
*/
@deprecated("Use BalancingPool instead of BalancingDispatcher", "2.3")
-private[akka] class BalancingDispatcher(_configurator: MessageDispatcherConfigurator,
- _id: String,
- throughput: Int,
- throughputDeadlineTime: Duration,
- _mailboxType: MailboxType,
- _executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
- _shutdownTimeout: FiniteDuration,
- attemptTeamWork: Boolean)
- extends Dispatcher(_configurator,
- _id,
- throughput,
- throughputDeadlineTime,
- _executorServiceFactoryProvider,
- _shutdownTimeout) {
+private[akka] class BalancingDispatcher(
+ _configurator: MessageDispatcherConfigurator,
+ _id: String,
+ throughput: Int,
+ throughputDeadlineTime: Duration,
+ _mailboxType: MailboxType,
+ _executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
+ _shutdownTimeout: FiniteDuration,
+ attemptTeamWork: Boolean)
+ extends Dispatcher(
+ _configurator,
+ _id,
+ throughput,
+ throughputDeadlineTime,
+ _executorServiceFactoryProvider,
+ _shutdownTimeout) {
/**
* INTERNAL API
diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
index 9e3ded6a17..98e9a2f7b2 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
@@ -25,12 +25,13 @@ import java.util.concurrent.atomic.AtomicReferenceFieldUpdater
* always continues until the mailbox is empty.
* Larger values (or zero or negative) increase throughput, smaller values increase fairness
*/
-class Dispatcher(_configurator: MessageDispatcherConfigurator,
- val id: String,
- val throughput: Int,
- val throughputDeadlineTime: Duration,
- executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
- val shutdownTimeout: FiniteDuration)
+class Dispatcher(
+ _configurator: MessageDispatcherConfigurator,
+ val id: String,
+ val throughput: Int,
+ val throughputDeadlineTime: Duration,
+ executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
+ val shutdownTimeout: FiniteDuration)
extends MessageDispatcher(_configurator) {
import configurator.prerequisites._
@@ -88,9 +89,10 @@ class Dispatcher(_configurator: MessageDispatcherConfigurator,
new Mailbox(mailboxType.create(Some(actor.self), Some(actor.system))) with DefaultSystemMessageQueue
}
- private val esUpdater = AtomicReferenceFieldUpdater.newUpdater(classOf[Dispatcher],
- classOf[LazyExecutorServiceDelegate],
- "executorServiceDelegate")
+ private val esUpdater = AtomicReferenceFieldUpdater.newUpdater(
+ classOf[Dispatcher],
+ classOf[LazyExecutorServiceDelegate],
+ "executorServiceDelegate")
/**
* INTERNAL API
@@ -106,9 +108,10 @@ class Dispatcher(_configurator: MessageDispatcherConfigurator,
*
* INTERNAL API
*/
- protected[akka] override def registerForExecution(mbox: Mailbox,
- hasMessageHint: Boolean,
- hasSystemMessageHint: Boolean): Boolean = {
+ protected[akka] override def registerForExecution(
+ mbox: Mailbox,
+ hasMessageHint: Boolean,
+ hasSystemMessageHint: Boolean): Boolean = {
if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races
if (mbox.setAsScheduled()) {
try {
diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
index 35a0c17b00..b240e2c68d 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
@@ -29,13 +29,14 @@ trait DispatcherPrerequisites {
/**
* INTERNAL API
*/
-private[akka] final case class DefaultDispatcherPrerequisites(val threadFactory: ThreadFactory,
- val eventStream: EventStream,
- val scheduler: Scheduler,
- val dynamicAccess: DynamicAccess,
- val settings: ActorSystem.Settings,
- val mailboxes: Mailboxes,
- val defaultExecutionContext: Option[ExecutionContext])
+private[akka] final case class DefaultDispatcherPrerequisites(
+ val threadFactory: ThreadFactory,
+ val eventStream: EventStream,
+ val scheduler: Scheduler,
+ val dynamicAccess: DynamicAccess,
+ val settings: ActorSystem.Settings,
+ val mailboxes: Mailboxes,
+ val defaultExecutionContext: Option[ExecutionContext])
extends DispatcherPrerequisites
object Dispatchers {
@@ -207,12 +208,13 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites)
extends MessageDispatcherConfigurator(config, prerequisites) {
- private val instance = new Dispatcher(this,
- config.getString("id"),
- config.getInt("throughput"),
- config.getNanosDuration("throughput-deadline-time"),
- configureExecutor(),
- config.getMillisDuration("shutdown-timeout"))
+ private val instance = new Dispatcher(
+ this,
+ config.getString("id"),
+ config.getInt("throughput"),
+ config.getNanosDuration("throughput-deadline-time"),
+ configureExecutor(),
+ config.getMillisDuration("shutdown-timeout"))
/**
* Returns the same dispatcher instance for each invocation
@@ -265,14 +267,15 @@ class BalancingDispatcherConfigurator(_config: Config, _prerequisites: Dispatche
}
protected def create(mailboxType: MailboxType): BalancingDispatcher =
- new BalancingDispatcher(this,
- config.getString("id"),
- config.getInt("throughput"),
- config.getNanosDuration("throughput-deadline-time"),
- mailboxType,
- configureExecutor(),
- config.getMillisDuration("shutdown-timeout"),
- config.getBoolean("attempt-teamwork"))
+ new BalancingDispatcher(
+ this,
+ config.getString("id"),
+ config.getInt("throughput"),
+ config.getNanosDuration("throughput-deadline-time"),
+ mailboxType,
+ configureExecutor(),
+ config.getMillisDuration("shutdown-timeout"),
+ config.getBoolean("attempt-teamwork"))
/**
* Returns the same dispatcher instance for each invocation
@@ -292,10 +295,11 @@ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrer
case e: ThreadPoolExecutorConfigurator => e.threadPoolConfig
case _ =>
prerequisites.eventStream.publish(
- Warning("PinnedDispatcherConfigurator",
- this.getClass,
- "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config."
- .format(config.getString("id"))))
+ Warning(
+ "PinnedDispatcherConfigurator",
+ this.getClass,
+ "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format(
+ config.getString("id"))))
ThreadPoolConfig()
}
@@ -303,10 +307,11 @@ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrer
* Creates new dispatcher for each invocation.
*/
override def dispatcher(): MessageDispatcher =
- new PinnedDispatcher(this,
- null,
- config.getString("id"),
- config.getMillisDuration("shutdown-timeout"),
- threadPoolConfig)
+ new PinnedDispatcher(
+ this,
+ null,
+ config.getString("id"),
+ config.getMillisDuration("shutdown-timeout"),
+ threadPoolConfig)
}
diff --git a/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala b/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala
index 94a4fa4d23..aefce7c74f 100644
--- a/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala
@@ -14,15 +14,17 @@ object ForkJoinExecutorConfigurator {
/**
* INTERNAL AKKA USAGE ONLY
*/
- final class AkkaForkJoinPool(parallelism: Int,
- threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
- unhandledExceptionHandler: Thread.UncaughtExceptionHandler,
- asyncMode: Boolean)
+ final class AkkaForkJoinPool(
+ parallelism: Int,
+ threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
+ unhandledExceptionHandler: Thread.UncaughtExceptionHandler,
+ asyncMode: Boolean)
extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, asyncMode)
with LoadMetrics {
- def this(parallelism: Int,
- threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
- unhandledExceptionHandler: Thread.UncaughtExceptionHandler) =
+ def this(
+ parallelism: Int,
+ threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
+ unhandledExceptionHandler: Thread.UncaughtExceptionHandler) =
this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true)
override def execute(r: Runnable): Unit =
@@ -71,9 +73,10 @@ class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrer
"The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!")
}
- class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
- val parallelism: Int,
- val asyncMode: Boolean)
+ class ForkJoinExecutorServiceFactory(
+ val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
+ val parallelism: Int,
+ val asyncMode: Boolean)
extends ExecutorServiceFactory {
def this(threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, parallelism: Int) =
this(threadFactory, parallelism, asyncMode = true)
@@ -98,10 +101,12 @@ class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrer
""""task-peeking-mode" in "fork-join-executor" section could only set to "FIFO" or "LIFO".""")
}
- new ForkJoinExecutorServiceFactory(validate(tf),
- ThreadPoolConfig.scaledPoolSize(config.getInt("parallelism-min"),
- config.getDouble("parallelism-factor"),
- config.getInt("parallelism-max")),
- asyncMode)
+ new ForkJoinExecutorServiceFactory(
+ validate(tf),
+ ThreadPoolConfig.scaledPoolSize(
+ config.getInt("parallelism-min"),
+ config.getDouble("parallelism-factor"),
+ config.getInt("parallelism-max")),
+ asyncMode)
}
}
diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala
index 32c7b694ae..1aa25f19a3 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Future.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala
@@ -63,8 +63,9 @@ object ExecutionContexts {
* @param errorReporter a Procedure that will log any exceptions passed to it
* @return a new ExecutionContext
*/
- def fromExecutorService(executorService: ExecutorService,
- errorReporter: Procedure[Throwable]): ExecutionContextExecutorService =
+ def fromExecutorService(
+ executorService: ExecutorService,
+ errorReporter: Procedure[Throwable]): ExecutionContextExecutorService =
ExecutionContext.fromExecutorService(executorService, errorReporter.apply)
/**
@@ -133,9 +134,10 @@ object Futures {
/**
* Returns a Future that will hold the optional result of the first Future with a result that matches the predicate
*/
- def find[T <: AnyRef](futures: JIterable[Future[T]],
- predicate: JFunc[T, java.lang.Boolean],
- executor: ExecutionContext): Future[JOption[T]] = {
+ def find[T <: AnyRef](
+ futures: JIterable[Future[T]],
+ predicate: JFunc[T, java.lang.Boolean],
+ executor: ExecutionContext): Future[JOption[T]] = {
implicit val ec = executor
compat.Future.find[T](futures.asScala)(predicate.apply(_))(executor).map(JOption.fromScalaOption)
}
@@ -152,18 +154,20 @@ object Futures {
* the result will be the first failure of any of the futures, or any failure in the actual fold,
* or the result of the fold.
*/
- def fold[T <: AnyRef, R <: AnyRef](zero: R,
- futures: JIterable[Future[T]],
- fun: akka.japi.Function2[R, T, R],
- executor: ExecutionContext): Future[R] =
+ def fold[T <: AnyRef, R <: AnyRef](
+ zero: R,
+ futures: JIterable[Future[T]],
+ fun: akka.japi.Function2[R, T, R],
+ executor: ExecutionContext): Future[R] =
compat.Future.fold(futures.asScala)(zero)(fun.apply)(executor)
/**
* Reduces the results of the supplied futures and binary function.
*/
- def reduce[T <: AnyRef, R >: T](futures: JIterable[Future[T]],
- fun: akka.japi.Function2[R, T, R],
- executor: ExecutionContext): Future[R] =
+ def reduce[T <: AnyRef, R >: T](
+ futures: JIterable[Future[T]],
+ fun: akka.japi.Function2[R, T, R],
+ executor: ExecutionContext): Future[R] =
compat.Future.reduce[T, R](futures.asScala)(fun.apply)(executor)
/**
diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala
index 8c837d2e84..d965e82e76 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala
@@ -255,11 +255,12 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue)
/**
* Process the messages in the mailbox
*/
- @tailrec private final def processMailbox(left: Int = java.lang.Math.max(dispatcher.throughput, 1),
- deadlineNs: Long =
- if (dispatcher.isThroughputDeadlineTimeDefined == true)
- System.nanoTime + dispatcher.throughputDeadlineTime.toNanos
- else 0L): Unit =
+ @tailrec private final def processMailbox(
+ left: Int = java.lang.Math.max(dispatcher.throughput, 1),
+ deadlineNs: Long =
+ if (dispatcher.isThroughputDeadlineTimeDefined == true)
+ System.nanoTime + dispatcher.throughputDeadlineTime.toNanos
+ else 0L): Unit =
if (shouldProcessMessage) {
val next = dequeue()
if (next ne null) {
@@ -312,10 +313,11 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue)
case e: InterruptedException => interruption = e
case NonFatal(e) =>
actor.system.eventStream.publish(
- Error(e,
- actor.self.path.toString,
- this.getClass,
- "error while enqueuing " + msg + " to deadLetters: " + e.getMessage))
+ Error(
+ e,
+ actor.self.path.toString,
+ this.getClass,
+ "error while enqueuing " + msg + " to deadLetters: " + e.getMessage))
}
}
// if we got an interrupted exception while handling system messages, then rethrow it
@@ -738,9 +740,10 @@ object UnboundedPriorityMailbox {
* BoundedPriorityMailbox is a bounded mailbox that allows for prioritization of its contents.
* Extend this class and provide the Comparator in the constructor.
*/
-class BoundedPriorityMailbox(final val cmp: Comparator[Envelope],
- final val capacity: Int,
- override final val pushTimeOut: Duration)
+class BoundedPriorityMailbox(
+ final val cmp: Comparator[Envelope],
+ final val capacity: Int,
+ override final val pushTimeOut: Duration)
extends MailboxType
with ProducesMessageQueue[BoundedPriorityMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
@@ -786,9 +789,10 @@ object UnboundedStablePriorityMailbox {
* [[BoundedPriorityMailbox]] it preserves ordering for messages of equal priority.
* Extend this class and provide the Comparator in the constructor.
*/
-class BoundedStablePriorityMailbox(final val cmp: Comparator[Envelope],
- final val capacity: Int,
- override final val pushTimeOut: Duration)
+class BoundedStablePriorityMailbox(
+ final val cmp: Comparator[Envelope],
+ final val capacity: Int,
+ override final val pushTimeOut: Duration)
extends MailboxType
with ProducesMessageQueue[BoundedStablePriorityMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala
index aba568e8df..9e066dcc8d 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala
@@ -27,10 +27,11 @@ object Mailboxes {
final val NoMailboxRequirement = ""
}
-private[akka] class Mailboxes(val settings: ActorSystem.Settings,
- val eventStream: EventStream,
- dynamicAccess: DynamicAccess,
- deadLetters: ActorRef) {
+private[akka] class Mailboxes(
+ val settings: ActorSystem.Settings,
+ val eventStream: EventStream,
+ dynamicAccess: DynamicAccess,
+ deadLetters: ActorRef) {
import Mailboxes._
@@ -70,9 +71,10 @@ private[akka] class Mailboxes(val settings: ActorSystem.Settings,
}
.recover {
case e =>
- throw new ConfigurationException(s"Type [${k}] specified as akka.actor.mailbox.requirement " +
- s"[${v}] in config can't be loaded due to [${e.getMessage}]",
- e)
+ throw new ConfigurationException(
+ s"Type [${k}] specified as akka.actor.mailbox.requirement " +
+ s"[${v}] in config can't be loaded due to [${e.getMessage}]",
+ e)
}
.get
}
@@ -147,9 +149,10 @@ private[akka] class Mailboxes(val settings: ActorSystem.Settings,
// TODO remove in 2.3
if (!hasMailboxType && !mailboxSizeWarningIssued && dispatcherConfig.hasPath("mailbox-size")) {
eventStream.publish(
- Warning("mailboxes",
- getClass,
- s"ignoring setting 'mailbox-size' for dispatcher [$id], you need to specify 'mailbox-type=bounded'"))
+ Warning(
+ "mailboxes",
+ getClass,
+ s"ignoring setting 'mailbox-size' for dispatcher [$id], you need to specify 'mailbox-type=bounded'"))
mailboxSizeWarningIssued = true
}
diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala
index 4f5c29df72..84df16bd3d 100644
--- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala
@@ -14,17 +14,19 @@ import scala.concurrent.duration.FiniteDuration
* The preferred way of creating dispatchers is to define configuration of it and use the
* the `lookup` method in [[akka.dispatch.Dispatchers]].
*/
-class PinnedDispatcher(_configurator: MessageDispatcherConfigurator,
- _actor: ActorCell,
- _id: String,
- _shutdownTimeout: FiniteDuration,
- _threadPoolConfig: ThreadPoolConfig)
- extends Dispatcher(_configurator,
- _id,
- Int.MaxValue,
- Duration.Zero,
- _threadPoolConfig.copy(corePoolSize = 1, maxPoolSize = 1),
- _shutdownTimeout) {
+class PinnedDispatcher(
+ _configurator: MessageDispatcherConfigurator,
+ _actor: ActorCell,
+ _id: String,
+ _shutdownTimeout: FiniteDuration,
+ _threadPoolConfig: ThreadPoolConfig)
+ extends Dispatcher(
+ _configurator,
+ _id,
+ Int.MaxValue,
+ Duration.Zero,
+ _threadPoolConfig.copy(corePoolSize = 1, maxPoolSize = 1),
+ _shutdownTimeout) {
@volatile
private var owner: ActorCell = _actor
diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
index 752f89b70d..17fcaf5126 100644
--- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
@@ -66,22 +66,24 @@ trait ExecutorServiceFactoryProvider {
/**
* A small configuration DSL to create ThreadPoolExecutors that can be provided as an ExecutorServiceFactoryProvider to Dispatcher
*/
-final case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout,
- corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize,
- maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize,
- threadTimeout: Duration = ThreadPoolConfig.defaultTimeout,
- queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(),
- rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy)
+final case class ThreadPoolConfig(
+ allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout,
+ corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize,
+ maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize,
+ threadTimeout: Duration = ThreadPoolConfig.defaultTimeout,
+ queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(),
+ rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy)
extends ExecutorServiceFactoryProvider {
class ThreadPoolExecutorServiceFactory(val threadFactory: ThreadFactory) extends ExecutorServiceFactory {
def createExecutorService: ExecutorService = {
- val service: ThreadPoolExecutor = new ThreadPoolExecutor(corePoolSize,
- maxPoolSize,
- threadTimeout.length,
- threadTimeout.unit,
- queueFactory(),
- threadFactory,
- rejectionPolicy) with LoadMetrics {
+ val service: ThreadPoolExecutor = new ThreadPoolExecutor(
+ corePoolSize,
+ maxPoolSize,
+ threadTimeout.length,
+ threadTimeout.unit,
+ queueFactory(),
+ threadFactory,
+ rejectionPolicy) with LoadMetrics {
def atFullThrottle(): Boolean = this.getActiveCount >= this.getPoolSize
}
service.allowCoreThreadTimeOut(allowCorePoolTimeout)
@@ -120,8 +122,9 @@ final case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) {
def withNewThreadPoolWithSynchronousQueueWithFairness(fair: Boolean): ThreadPoolConfigBuilder =
this.copy(config = config.copy(queueFactory = synchronousQueue(fair)))
- def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(capacity: Int,
- fair: Boolean): ThreadPoolConfigBuilder =
+ def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(
+ capacity: Int,
+ fair: Boolean): ThreadPoolConfigBuilder =
this.copy(config = config.copy(queueFactory = arrayBlockingQueue(capacity, fair)))
def setFixedPoolSize(size: Int): ThreadPoolConfigBuilder =
@@ -176,12 +179,12 @@ object MonitorableThreadFactory {
}
}
-final case class MonitorableThreadFactory(name: String,
- daemonic: Boolean,
- contextClassLoader: Option[ClassLoader],
- exceptionHandler: Thread.UncaughtExceptionHandler =
- MonitorableThreadFactory.doNothing,
- protected val counter: AtomicLong = new AtomicLong)
+final case class MonitorableThreadFactory(
+ name: String,
+ daemonic: Boolean,
+ contextClassLoader: Option[ClassLoader],
+ exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing,
+ protected val counter: AtomicLong = new AtomicLong)
extends ThreadFactory
with ForkJoinPool.ForkJoinWorkerThreadFactory {
diff --git a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala
index 434a8c592b..3cdc3fc81a 100644
--- a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala
@@ -123,13 +123,14 @@ private[affinity] object AffinityPool {
*/
@InternalApi
@ApiMayChange
-private[akka] class AffinityPool(id: String,
- parallelism: Int,
- affinityGroupSize: Int,
- threadFactory: ThreadFactory,
- idleCpuLevel: Int,
- final val queueSelector: QueueSelector,
- rejectionHandler: RejectionHandler)
+private[akka] class AffinityPool(
+ id: String,
+ parallelism: Int,
+ affinityGroupSize: Int,
+ threadFactory: ThreadFactory,
+ idleCpuLevel: Int,
+ final val queueSelector: QueueSelector,
+ rejectionHandler: RejectionHandler)
extends AbstractExecutorService {
if (parallelism <= 0)
@@ -247,8 +248,9 @@ private[akka] class AffinityPool(id: String,
override def toString: String =
s"${Logging.simpleName(this)}(id = $id, parallelism = $parallelism, affinityGroupSize = $affinityGroupSize, threadFactory = $threadFactory, idleCpuLevel = $idleCpuLevel, queueSelector = $queueSelector, rejectionHandler = $rejectionHandler)"
- private[this] final class AffinityPoolWorker(final val q: BoundedAffinityTaskQueue,
- final val idleStrategy: IdleStrategy)
+ private[this] final class AffinityPoolWorker(
+ final val q: BoundedAffinityTaskQueue,
+ final val idleStrategy: IdleStrategy)
extends Runnable {
final val thread: Thread = threadFactory.newThread(this)
@@ -313,9 +315,10 @@ private[akka] class AffinityPool(id: String,
private[akka] final class AffinityPoolConfigurator(config: Config, prerequisites: DispatcherPrerequisites)
extends ExecutorServiceConfigurator(config, prerequisites) {
- private val poolSize = ThreadPoolConfig.scaledPoolSize(config.getInt("parallelism-min"),
- config.getDouble("parallelism-factor"),
- config.getInt("parallelism-max"))
+ private val poolSize = ThreadPoolConfig.scaledPoolSize(
+ config.getInt("parallelism-min"),
+ config.getDouble("parallelism-factor"),
+ config.getInt("parallelism-max"))
private val taskQueueSize = config.getInt("task-queue-size")
private val idleCpuLevel = config
@@ -354,13 +357,14 @@ private[akka] final class AffinityPoolConfigurator(config: Config, prerequisites
new ExecutorServiceFactory {
override def createExecutorService: ExecutorService =
- new AffinityPool(id,
- poolSize,
- taskQueueSize,
- tf,
- idleCpuLevel,
- queueSelectorFactory.create(),
- rejectionHandlerFactory.create()).start()
+ new AffinityPool(
+ id,
+ poolSize,
+ taskQueueSize,
+ tf,
+ idleCpuLevel,
+ queueSelectorFactory.create(),
+ rejectionHandlerFactory.create()).start()
}
}
}
@@ -412,8 +416,9 @@ private[akka] final class FairDistributionHashCache(final val config: Config) ex
private[this] final val fairDistributionThreshold = config
.getInt("fair-work-distribution.threshold")
- .requiring(thr => 0 <= thr && thr <= MaxFairDistributionThreshold,
- s"fair-work-distribution.threshold must be between 0 and $MaxFairDistributionThreshold")
+ .requiring(
+ thr => 0 <= thr && thr <= MaxFairDistributionThreshold,
+ s"fair-work-distribution.threshold must be between 0 and $MaxFairDistributionThreshold")
override final def create(): QueueSelector =
new AtomicReference[ImmutableIntMap](ImmutableIntMap.empty) with QueueSelector {
diff --git a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
index 8265721e04..991fd30511 100644
--- a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
@@ -267,8 +267,9 @@ private[akka] final case class Failed(child: ActorRef, cause: Throwable, uid: In
with StashWhenWaitingForChildren
@SerialVersionUID(1L)
-private[akka] final case class DeathWatchNotification(actor: ActorRef,
- existenceConfirmed: Boolean,
- addressTerminated: Boolean)
+private[akka] final case class DeathWatchNotification(
+ actor: ActorRef,
+ existenceConfirmed: Boolean,
+ addressTerminated: Boolean)
extends SystemMessage
with DeadLetterSuppression
diff --git a/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala b/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala
index c6965ca77c..104c216ecf 100644
--- a/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala
+++ b/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala
@@ -33,12 +33,13 @@ class DeadLetterListener extends Actor {
val done = maxCount != Int.MaxValue && count >= maxCount
val doneMsg = if (done) ", no more dead letters will be logged" else ""
eventStream.publish(
- Info(rcp.path.toString,
- rcp.getClass,
- s"Message [${message.getClass.getName}] $origin to $rcp was not delivered. [$count] dead letters encountered$doneMsg. " +
- s"If this is not an expected behavior, then [$rcp] may have terminated unexpectedly, " +
- "This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' " +
- "and 'akka.log-dead-letters-during-shutdown'."))
+ Info(
+ rcp.path.toString,
+ rcp.getClass,
+ s"Message [${message.getClass.getName}] $origin to $rcp was not delivered. [$count] dead letters encountered$doneMsg. " +
+ s"If this is not an expected behavior, then [$rcp] may have terminated unexpectedly, " +
+ "This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' " +
+ "and 'akka.log-dead-letters-during-shutdown'."))
if (done) context.stop(self)
}
diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala
index 690458cfcb..3f65961064 100644
--- a/akka-actor/src/main/scala/akka/event/EventBus.scala
+++ b/akka-actor/src/main/scala/akka/event/EventBus.scala
@@ -266,8 +266,9 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier =>
protected def system: ActorSystem
- private class ManagedActorClassificationMappings(val seqNr: Int,
- val backing: Map[ActorRef, immutable.TreeSet[ActorRef]]) {
+ private class ManagedActorClassificationMappings(
+ val seqNr: Int,
+ val backing: Map[ActorRef, immutable.TreeSet[ActorRef]]) {
def get(monitored: ActorRef): immutable.TreeSet[ActorRef] = backing.getOrElse(monitored, empty)
diff --git a/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala b/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala
index aed20235d9..8cc3fac41d 100644
--- a/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala
+++ b/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala
@@ -34,9 +34,10 @@ protected[akka] class EventStreamUnsubscriber(eventStream: EventStream, debug: B
case Register(actor) =>
if (debug)
eventStream.publish(
- Logging.Debug(simpleName(getClass),
- getClass,
- s"watching $actor in order to unsubscribe from EventStream when it terminates"))
+ Logging.Debug(
+ simpleName(getClass),
+ getClass,
+ s"watching $actor in order to unsubscribe from EventStream when it terminates"))
context.watch(actor)
case UnregisterIfNoMoreSubscribedChannels(actor) if eventStream.hasSubscriptions(actor) =>
diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala
index b492e38f8d..173e8d1b33 100644
--- a/akka-actor/src/main/scala/akka/event/Logging.scala
+++ b/akka-actor/src/main/scala/akka/event/Logging.scala
@@ -76,10 +76,11 @@ trait LoggingBus extends ActorEventBus {
val level = levelFor(config.StdoutLogLevel).getOrElse {
// only log initialization errors directly with StandardOutLogger.print
StandardOutLogger.print(
- Error(new LoggerException,
- simpleName(this),
- this.getClass,
- "unknown akka.stdout-loglevel " + config.StdoutLogLevel))
+ Error(
+ new LoggerException,
+ simpleName(this),
+ this.getClass,
+ "unknown akka.stdout-loglevel " + config.StdoutLogLevel))
ErrorLevel
}
AllLogLevels.filter(level >= _).foreach(l => subscribe(StandardOutLogger, classFor(l)))
@@ -125,9 +126,10 @@ trait LoggingBus extends ActorEventBus {
})
.recover({
case e =>
- throw new ConfigurationException("Logger specified in config can't be loaded [" + loggerName +
- "] due to [" + e.toString + "]",
- e)
+ throw new ConfigurationException(
+ "Logger specified in config can't be loaded [" + loggerName +
+ "] due to [" + e.toString + "]",
+ e)
})
.get
}
@@ -186,10 +188,11 @@ trait LoggingBus extends ActorEventBus {
/**
* INTERNAL API
*/
- private def addLogger(system: ActorSystemImpl,
- clazz: Class[_ <: Actor],
- level: LogLevel,
- logName: String): ActorRef = {
+ private def addLogger(
+ system: ActorSystemImpl,
+ clazz: Class[_ <: Actor],
+ level: LogLevel,
+ logName: String): ActorRef = {
val name = "log" + LogExt(system).id() + "-" + simpleName(clazz)
val actor = system.systemActorOf(Props(clazz).withDispatcher(system.settings.LoggersDispatcher), name)
implicit def timeout = system.settings.LoggerStartTimeout
@@ -198,9 +201,10 @@ trait LoggingBus extends ActorEventBus {
catch {
case _: TimeoutException =>
publish(
- Warning(logName,
- this.getClass,
- "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)"))
+ Warning(
+ logName,
+ this.getClass,
+ "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)"))
"[TIMEOUT]"
}
if (response != LoggerInitialized)
@@ -761,12 +765,13 @@ object Logging {
case level => throw new IllegalArgumentException(s"Unsupported log level [$level]")
}
- def apply(level: LogLevel,
- logSource: String,
- logClass: Class[_],
- message: Any,
- mdc: MDC,
- marker: LogMarker): LogEvent = level match {
+ def apply(
+ level: LogLevel,
+ logSource: String,
+ logClass: Class[_],
+ message: Any,
+ mdc: MDC,
+ marker: LogMarker): LogEvent = level match {
case ErrorLevel => Error(logSource, logClass, message, mdc, marker)
case WarningLevel => Warning(logSource, logClass, message, mdc, marker)
case InfoLevel => Info(logSource, logClass, message, mdc, marker)
@@ -789,21 +794,23 @@ object Logging {
def this(logSource: String, logClass: Class[_], message: Any) = this(Error.NoCause, logSource, logClass, message)
override def level = ErrorLevel
}
- class Error2(override val cause: Throwable,
- logSource: String,
- logClass: Class[_],
- message: Any = "",
- override val mdc: MDC)
+ class Error2(
+ override val cause: Throwable,
+ logSource: String,
+ logClass: Class[_],
+ message: Any = "",
+ override val mdc: MDC)
extends Error(cause, logSource, logClass, message) {
def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC) =
this(Error.NoCause, logSource, logClass, message, mdc)
}
- class Error3(override val cause: Throwable,
- logSource: String,
- logClass: Class[_],
- message: Any,
- override val mdc: MDC,
- override val marker: LogMarker)
+ class Error3(
+ override val cause: Throwable,
+ logSource: String,
+ logClass: Class[_],
+ message: Any,
+ override val mdc: MDC,
+ override val marker: LogMarker)
extends Error2(cause, logSource, logClass, message, mdc)
with LogEventWithMarker {
def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) =
@@ -839,19 +846,21 @@ object Logging {
}
class Warning2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC)
extends Warning(logSource, logClass, message)
- class Warning3(logSource: String,
- logClass: Class[_],
- message: Any,
- override val mdc: MDC,
- override val marker: LogMarker)
+ class Warning3(
+ logSource: String,
+ logClass: Class[_],
+ message: Any,
+ override val mdc: MDC,
+ override val marker: LogMarker)
extends Warning2(logSource, logClass, message, mdc)
with LogEventWithMarker
- class Warning4(logSource: String,
- logClass: Class[_],
- message: Any,
- override val mdc: MDC,
- override val marker: LogMarker,
- override val cause: Throwable)
+ class Warning4(
+ logSource: String,
+ logClass: Class[_],
+ message: Any,
+ override val mdc: MDC,
+ override val marker: LogMarker,
+ override val cause: Throwable)
extends Warning2(logSource, logClass, message, mdc)
with LogEventWithMarker
with LogEventWithCause
@@ -875,11 +884,12 @@ object Logging {
}
class Info2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC)
extends Info(logSource, logClass, message)
- class Info3(logSource: String,
- logClass: Class[_],
- message: Any,
- override val mdc: MDC,
- override val marker: LogMarker)
+ class Info3(
+ logSource: String,
+ logClass: Class[_],
+ message: Any,
+ override val mdc: MDC,
+ override val marker: LogMarker)
extends Info2(logSource, logClass, message, mdc)
with LogEventWithMarker
object Info {
@@ -897,11 +907,12 @@ object Logging {
}
class Debug2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC)
extends Debug(logSource, logClass, message)
- class Debug3(logSource: String,
- logClass: Class[_],
- message: Any,
- override val mdc: MDC,
- override val marker: LogMarker)
+ class Debug3(
+ logSource: String,
+ logClass: Class[_],
+ message: Any,
+ override val mdc: MDC,
+ override val marker: LogMarker)
extends Debug2(logSource, logClass, message, mdc)
with LogEventWithMarker
object Debug {
@@ -979,33 +990,36 @@ object Logging {
case e: Error3 => // has marker
val f = if (event.cause == Error.NoCause) ErrorWithoutCauseWithMarkerFormat else ErrorFormatWithMarker
println(
- f.format(e.marker.name,
- timestamp(event),
- event.thread.getName,
- event.logSource,
- formatMDC(event.mdc),
- event.message,
- stackTraceFor(event.cause)))
+ f.format(
+ e.marker.name,
+ timestamp(event),
+ event.thread.getName,
+ event.logSource,
+ formatMDC(event.mdc),
+ event.message,
+ stackTraceFor(event.cause)))
case _ =>
val f = if (event.cause == Error.NoCause) ErrorFormatWithoutCause else ErrorFormat
println(
- f.format(timestamp(event),
- event.thread.getName,
- event.logSource,
- formatMDC(event.mdc),
- event.message,
- stackTraceFor(event.cause)))
+ f.format(
+ timestamp(event),
+ event.thread.getName,
+ event.logSource,
+ formatMDC(event.mdc),
+ event.message,
+ stackTraceFor(event.cause)))
}
def warning(event: Warning): Unit = event match {
case e: Warning3 => // has marker
println(
- WarningWithMarkerFormat.format(e.marker.name,
- timestamp(event),
- event.thread.getName,
- event.logSource,
- formatMDC(event.mdc),
- event.message))
+ WarningWithMarkerFormat.format(
+ e.marker.name,
+ timestamp(event),
+ event.thread.getName,
+ event.logSource,
+ formatMDC(event.mdc),
+ event.message))
case _ =>
println(
WarningFormat
@@ -1015,12 +1029,13 @@ object Logging {
def info(event: Info): Unit = event match {
case e: Info3 => // has marker
println(
- InfoWithMarkerFormat.format(e.marker.name,
- timestamp(event),
- event.thread.getName,
- event.logSource,
- formatMDC(event.mdc),
- event.message))
+ InfoWithMarkerFormat.format(
+ e.marker.name,
+ timestamp(event),
+ event.thread.getName,
+ event.logSource,
+ formatMDC(event.mdc),
+ event.message))
case _ =>
println(
InfoFormat
@@ -1030,12 +1045,13 @@ object Logging {
def debug(event: Debug): Unit = event match {
case e: Debug3 => // has marker
println(
- DebugWithMarkerFormat.format(e.marker.name,
- timestamp(event),
- event.thread.getName,
- event.logSource,
- formatMDC(event.mdc),
- event.message))
+ DebugWithMarkerFormat.format(
+ e.marker.name,
+ timestamp(event),
+ event.thread.getName,
+ event.logSource,
+ formatMDC(event.mdc),
+ event.message))
case _ =>
println(
DebugFormat
@@ -1622,10 +1638,11 @@ object LogMarker {
/**
* [[LoggingAdapter]] extension which adds Marker support.
*/
-class MarkerLoggingAdapter(override val bus: LoggingBus,
- override val logSource: String,
- override val logClass: Class[_],
- loggingFilter: LoggingFilter)
+class MarkerLoggingAdapter(
+ override val bus: LoggingBus,
+ override val logSource: String,
+ override val logClass: Class[_],
+ loggingFilter: LoggingFilter)
extends BusLogging(bus, logSource, logClass, loggingFilter) {
// TODO when breaking binary compatibility, these marker methods should become baked into LoggingAdapter itself
@@ -1872,10 +1889,11 @@ class MarkerLoggingAdapter(override val bus: LoggingBus,
}
}
-final class DiagnosticMarkerBusLoggingAdapter(override val bus: LoggingBus,
- override val logSource: String,
- override val logClass: Class[_],
- loggingFilter: LoggingFilter)
+final class DiagnosticMarkerBusLoggingAdapter(
+ override val bus: LoggingBus,
+ override val logSource: String,
+ override val logClass: Class[_],
+ loggingFilter: LoggingFilter)
extends MarkerLoggingAdapter(bus, logSource, logClass, loggingFilter)
with DiagnosticLoggingAdapter
@@ -1956,19 +1974,21 @@ object NoMarkerLogging extends MarkerLoggingAdapter(null, "source", classOf[Stri
final override def error(marker: LogMarker, cause: Throwable, message: String): Unit = ()
final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any): Unit = ()
final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = ()
- final override def error(marker: LogMarker,
- cause: Throwable,
- template: String,
- arg1: Any,
- arg2: Any,
- arg3: Any): Unit = ()
- final override def error(marker: LogMarker,
- cause: Throwable,
- template: String,
- arg1: Any,
- arg2: Any,
- arg3: Any,
- arg4: Any): Unit = ()
+ final override def error(
+ marker: LogMarker,
+ cause: Throwable,
+ template: String,
+ arg1: Any,
+ arg2: Any,
+ arg3: Any): Unit = ()
+ final override def error(
+ marker: LogMarker,
+ cause: Throwable,
+ template: String,
+ arg1: Any,
+ arg2: Any,
+ arg3: Any,
+ arg4: Any): Unit = ()
final override def error(marker: LogMarker, message: String): Unit = ()
final override def error(marker: LogMarker, template: String, arg1: Any): Unit = ()
final override def error(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit = ()
diff --git a/akka-actor/src/main/scala/akka/io/Dns.scala b/akka-actor/src/main/scala/akka/io/Dns.scala
index 4abf5b7be0..5e467ec79b 100644
--- a/akka-actor/src/main/scala/akka/io/Dns.scala
+++ b/akka-actor/src/main/scala/akka/io/Dns.scala
@@ -115,29 +115,26 @@ class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: Strin
@InternalApi
private[akka] def loadAsyncDns(managerName: String): ActorRef = {
// This can't pass in `this` as then AsyncDns would pick up the system settings
- asyncDns.computeIfAbsent(managerName,
- new JFunction[String, ActorRef] {
- override def apply(r: String): ActorRef = {
- val settings =
- new Settings(system.settings.config.getConfig("akka.io.dns"), "async-dns")
- val provider = system.dynamicAccess
- .getClassFor[DnsProvider](settings.ProviderObjectName)
- .get
- .newInstance()
- system.log.info("Creating async dns resolver {} with manager name {}",
- settings.Resolver,
- managerName)
- system.systemActorOf(
- props = Props(provider.managerClass,
- settings.Resolver,
- system,
- settings.ResolverConfig,
- provider.cache,
- settings.Dispatcher,
- provider).withDeploy(Deploy.local).withDispatcher(settings.Dispatcher),
- name = managerName)
- }
- })
+ asyncDns.computeIfAbsent(
+ managerName,
+ new JFunction[String, ActorRef] {
+ override def apply(r: String): ActorRef = {
+ val settings =
+ new Settings(system.settings.config.getConfig("akka.io.dns"), "async-dns")
+ val provider = system.dynamicAccess.getClassFor[DnsProvider](settings.ProviderObjectName).get.newInstance()
+ system.log.info("Creating async dns resolver {} with manager name {}", settings.Resolver, managerName)
+ system.systemActorOf(
+ props = Props(
+ provider.managerClass,
+ settings.Resolver,
+ system,
+ settings.ResolverConfig,
+ provider.cache,
+ settings.Dispatcher,
+ provider).withDeploy(Deploy.local).withDispatcher(settings.Dispatcher),
+ name = managerName)
+ }
+ })
}
/**
diff --git a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala
index 3def2c3c3f..ceefdc4124 100644
--- a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala
+++ b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala
@@ -83,9 +83,10 @@ private[io] object SelectionHandler {
def failureMessage: Any
}
- final case class WorkerForCommand(apiCommand: HasFailureMessage,
- commander: ActorRef,
- childProps: ChannelRegistry => Props)
+ final case class WorkerForCommand(
+ apiCommand: HasFailureMessage,
+ commander: ActorRef,
+ childProps: ChannelRegistry => Props)
extends NoSerializationVerificationNeeded
final case class Retry(command: WorkerForCommand, retriesLeft: Int) extends NoSerializationVerificationNeeded {
@@ -118,10 +119,11 @@ private[io] object SelectionHandler {
*/
private[io] final val connectionSupervisorStrategy: SupervisorStrategy =
new OneForOneStrategy()(SupervisorStrategy.stoppingStrategy.decider) {
- override def logFailure(context: ActorContext,
- child: ActorRef,
- cause: Throwable,
- decision: SupervisorStrategy.Directive): Unit =
+ override def logFailure(
+ context: ActorContext,
+ child: ActorRef,
+ cause: Throwable,
+ decision: SupervisorStrategy.Directive): Unit =
if (cause.isInstanceOf[DeathPactException]) {
try context.system.eventStream.publish {
Logging.Debug(child.path.toString, getClass, "Closed after handler termination")
@@ -129,9 +131,10 @@ private[io] object SelectionHandler {
} else super.logFailure(context, child, cause, decision)
}
- private class ChannelRegistryImpl(executionContext: ExecutionContext,
- settings: SelectionHandlerSettings,
- log: LoggingAdapter)
+ private class ChannelRegistryImpl(
+ executionContext: ExecutionContext,
+ settings: SelectionHandlerSettings,
+ log: LoggingAdapter)
extends ChannelRegistry {
private[this] val selector = SelectorProvider.provider.openSelector
private[this] val wakeUp = new AtomicBoolean(false)
@@ -323,10 +326,11 @@ private[io] class SelectionHandler(settings: SelectionHandlerSettings)
case _: Exception => SupervisorStrategy.Stop
}
new OneForOneStrategy()(stoppingDecider) {
- override def logFailure(context: ActorContext,
- child: ActorRef,
- cause: Throwable,
- decision: SupervisorStrategy.Directive): Unit =
+ override def logFailure(
+ context: ActorContext,
+ child: ActorRef,
+ cause: Throwable,
+ decision: SupervisorStrategy.Directive): Unit =
try {
val logMessage = cause match {
case e: ActorInitializationException if (e.getCause ne null) && (e.getCause.getMessage ne null) =>
@@ -348,9 +352,9 @@ private[io] class SelectionHandler(settings: SelectionHandlerSettings)
if (MaxChannelsPerSelector == -1 || childCount < MaxChannelsPerSelector) {
val newName = sequenceNumber.toString
sequenceNumber += 1
- val child = context.actorOf(props =
- cmd.childProps(registry).withDispatcher(WorkerDispatcher).withDeploy(Deploy.local),
- name = newName)
+ val child = context.actorOf(
+ props = cmd.childProps(registry).withDispatcher(WorkerDispatcher).withDeploy(Deploy.local),
+ name = newName)
childCount += 1
if (MaxChannelsPerSelector > 0) context.watch(child) // we don't need to watch if we aren't limited
} else {
diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala
index 2a5e6d4d31..86fe3534b8 100644
--- a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala
+++ b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala
@@ -56,9 +56,10 @@ object SimpleDnsCache {
* INTERNAL API
*/
@InternalApi
- private[io] class Cache[K, V](queue: immutable.SortedSet[ExpiryEntry[K]],
- cache: immutable.Map[K, CacheEntry[V]],
- clock: () => Long) {
+ private[io] class Cache[K, V](
+ queue: immutable.SortedSet[ExpiryEntry[K]],
+ cache: immutable.Map[K, CacheEntry[V]],
+ clock: () => Long) {
def get(name: K): Option[V] = {
for {
e <- cache.get(name)
diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala
index 60dbd1e40c..f971cb5139 100644
--- a/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala
+++ b/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala
@@ -35,8 +35,9 @@ class SimpleDnsManager(val ext: DnsExt)
}
private val cleanupTimer = cacheCleanup.map { _ =>
- val interval = Duration(ext.Settings.ResolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS),
- TimeUnit.MILLISECONDS)
+ val interval = Duration(
+ ext.Settings.ResolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS),
+ TimeUnit.MILLISECONDS)
system.scheduler.schedule(interval, interval, self, SimpleDnsManager.CacheCleanup)
}
diff --git a/akka-actor/src/main/scala/akka/io/Tcp.scala b/akka-actor/src/main/scala/akka/io/Tcp.scala
index 87f546ec8b..ace3d780d9 100644
--- a/akka-actor/src/main/scala/akka/io/Tcp.scala
+++ b/akka-actor/src/main/scala/akka/io/Tcp.scala
@@ -116,11 +116,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* @param localAddress optionally specifies a specific address to bind to
* @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/
- final case class Connect(remoteAddress: InetSocketAddress,
- localAddress: Option[InetSocketAddress] = None,
- options: immutable.Traversable[SocketOption] = Nil,
- timeout: Option[FiniteDuration] = None,
- pullMode: Boolean = false)
+ final case class Connect(
+ remoteAddress: InetSocketAddress,
+ localAddress: Option[InetSocketAddress] = None,
+ options: immutable.Traversable[SocketOption] = Nil,
+ timeout: Option[FiniteDuration] = None,
+ pullMode: Boolean = false)
extends Command
/**
@@ -142,11 +143,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
*
* @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/
- final case class Bind(handler: ActorRef,
- localAddress: InetSocketAddress,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- pullMode: Boolean = false)
+ final case class Bind(
+ handler: ActorRef,
+ localAddress: InetSocketAddress,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ pullMode: Boolean = false)
extends Command
/**
@@ -697,11 +699,12 @@ object TcpMessage {
* @param timeout is the desired connection timeout, `null` means "no timeout"
* @param pullMode enables pull based reading from the connection
*/
- def connect(remoteAddress: InetSocketAddress,
- localAddress: InetSocketAddress,
- options: JIterable[SocketOption],
- timeout: FiniteDuration,
- pullMode: Boolean): Command =
+ def connect(
+ remoteAddress: InetSocketAddress,
+ localAddress: InetSocketAddress,
+ options: JIterable[SocketOption],
+ timeout: FiniteDuration,
+ pullMode: Boolean): Command =
Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode)
/**
@@ -716,11 +719,12 @@ object TcpMessage {
* @param timeout is the desired connection timeout, `null` means "no timeout"
* @param pullMode enables pull based reading from the connection
*/
- def connect(remoteAddress: InetSocketAddress,
- localAddress: InetSocketAddress,
- options: JIterable[SocketOption],
- timeout: java.time.Duration,
- pullMode: Boolean): Command = connect(remoteAddress, localAddress, options, timeout.asScala, pullMode)
+ def connect(
+ remoteAddress: InetSocketAddress,
+ localAddress: InetSocketAddress,
+ options: JIterable[SocketOption],
+ timeout: java.time.Duration,
+ pullMode: Boolean): Command = connect(remoteAddress, localAddress, options, timeout.asScala, pullMode)
/**
* Connect to the given `remoteAddress` without binding to a local address and without
@@ -750,11 +754,12 @@ object TcpMessage {
* @param pullMode enables pull based accepting and of connections and pull
* based reading from the accepted connections.
*/
- def bind(handler: ActorRef,
- endpoint: InetSocketAddress,
- backlog: Int,
- options: JIterable[SocketOption],
- pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode)
+ def bind(
+ handler: ActorRef,
+ endpoint: InetSocketAddress,
+ backlog: Int,
+ options: JIterable[SocketOption],
+ pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode)
/**
* Open a listening socket without specifying options.
diff --git a/akka-actor/src/main/scala/akka/io/TcpConnection.scala b/akka-actor/src/main/scala/akka/io/TcpConnection.scala
index 466f4d8805..c973d41fa1 100644
--- a/akka-actor/src/main/scala/akka/io/TcpConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpConnection.scala
@@ -112,9 +112,10 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
}
/** connection is closing but a write has to be finished first */
- def closingWithPendingWrite(info: ConnectionInfo,
- closeCommander: Option[ActorRef],
- closedEvent: ConnectionClosed): Receive = {
+ def closingWithPendingWrite(
+ info: ConnectionInfo,
+ closeCommander: Option[ActorRef],
+ closedEvent: ConnectionClosed): Receive = {
case SuspendReading => suspendReading(info)
case ResumeReading => resumeReading(info)
case ChannelReadable => doRead(info, closeCommander)
@@ -201,9 +202,10 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
// AUXILIARIES and IMPLEMENTATION
/** used in subclasses to start the common machinery above once a channel is connected */
- def completeConnect(registration: ChannelRegistration,
- commander: ActorRef,
- options: immutable.Traversable[SocketOption]): Unit = {
+ def completeConnect(
+ registration: ChannelRegistration,
+ commander: ActorRef,
+ options: immutable.Traversable[SocketOption]): Unit = {
this.registration = Some(registration)
// Turn off Nagle's algorithm by default
@@ -216,8 +218,9 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
}
options.foreach(_.afterConnect(channel.socket))
- commander ! Connected(channel.socket.getRemoteSocketAddress.asInstanceOf[InetSocketAddress],
- channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])
+ commander ! Connected(
+ channel.socket.getRemoteSocketAddress.asInstanceOf[InetSocketAddress],
+ channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])
context.setReceiveTimeout(RegisterTimeout)
@@ -434,11 +437,12 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
}
}
- class PendingBufferWrite(val commander: ActorRef,
- remainingData: ByteString,
- ack: Any,
- buffer: ByteBuffer,
- tail: WriteCommand)
+ class PendingBufferWrite(
+ val commander: ActorRef,
+ remainingData: ByteString,
+ ack: Any,
+ buffer: ByteBuffer,
+ tail: WriteCommand)
extends PendingWrite {
def doWrite(info: ConnectionInfo): PendingWrite = {
@@ -472,20 +476,22 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
def release(): Unit = bufferPool.release(buffer)
}
- def PendingWriteFile(commander: ActorRef,
- filePath: Path,
- offset: Long,
- count: Long,
- ack: Event,
- tail: WriteCommand): PendingWriteFile =
+ def PendingWriteFile(
+ commander: ActorRef,
+ filePath: Path,
+ offset: Long,
+ count: Long,
+ ack: Event,
+ tail: WriteCommand): PendingWriteFile =
new PendingWriteFile(commander, FileChannel.open(filePath), offset, count, ack, tail)
- class PendingWriteFile(val commander: ActorRef,
- fileChannel: FileChannel,
- offset: Long,
- remaining: Long,
- ack: Event,
- tail: WriteCommand)
+ class PendingWriteFile(
+ val commander: ActorRef,
+ fileChannel: FileChannel,
+ offset: Long,
+ remaining: Long,
+ ack: Event,
+ tail: WriteCommand)
extends PendingWrite
with Runnable {
@@ -533,10 +539,11 @@ private[io] object TcpConnection {
/**
* Groups required connection-related data that are only available once the connection has been fully established.
*/
- final case class ConnectionInfo(registration: ChannelRegistration,
- handler: ActorRef,
- keepOpenOnPeerClosed: Boolean,
- useResumeWriting: Boolean)
+ final case class ConnectionInfo(
+ registration: ChannelRegistration,
+ handler: ActorRef,
+ keepOpenOnPeerClosed: Boolean,
+ useResumeWriting: Boolean)
// INTERNAL MESSAGES
diff --git a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala
index da4023a5b7..b9596c180f 100644
--- a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala
@@ -15,12 +15,13 @@ import akka.io.Inet.SocketOption
*
* INTERNAL API
*/
-private[io] class TcpIncomingConnection(_tcp: TcpExt,
- _channel: SocketChannel,
- registry: ChannelRegistry,
- bindHandler: ActorRef,
- options: immutable.Traversable[SocketOption],
- readThrottling: Boolean)
+private[io] class TcpIncomingConnection(
+ _tcp: TcpExt,
+ _channel: SocketChannel,
+ registry: ChannelRegistry,
+ bindHandler: ActorRef,
+ options: immutable.Traversable[SocketOption],
+ readThrottling: Boolean)
extends TcpConnection(_tcp, _channel, readThrottling) {
signDeathPact(bindHandler)
diff --git a/akka-actor/src/main/scala/akka/io/TcpListener.scala b/akka-actor/src/main/scala/akka/io/TcpListener.scala
index a074e2f143..8fd7800b4a 100644
--- a/akka-actor/src/main/scala/akka/io/TcpListener.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpListener.scala
@@ -32,11 +32,12 @@ private[io] object TcpListener {
/**
* INTERNAL API
*/
-private[io] class TcpListener(selectorRouter: ActorRef,
- tcp: TcpExt,
- channelRegistry: ChannelRegistry,
- bindCommander: ActorRef,
- bind: Bind)
+private[io] class TcpListener(
+ selectorRouter: ActorRef,
+ tcp: TcpExt,
+ channelRegistry: ChannelRegistry,
+ bindCommander: ActorRef,
+ bind: Bind)
extends Actor
with ActorLogging
with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
diff --git a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala
index 2deadf7481..5ff921c082 100644
--- a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala
@@ -21,13 +21,15 @@ import akka.io.Tcp._
*
* INTERNAL API
*/
-private[io] class TcpOutgoingConnection(_tcp: TcpExt,
- channelRegistry: ChannelRegistry,
- commander: ActorRef,
- connect: Connect)
- extends TcpConnection(_tcp,
- SocketChannel.open().configureBlocking(false).asInstanceOf[SocketChannel],
- connect.pullMode) {
+private[io] class TcpOutgoingConnection(
+ _tcp: TcpExt,
+ channelRegistry: ChannelRegistry,
+ commander: ActorRef,
+ connect: Connect)
+ extends TcpConnection(
+ _tcp,
+ SocketChannel.open().configureBlocking(false).asInstanceOf[SocketChannel],
+ connect.pullMode) {
import TcpOutgoingConnection._
import context._
diff --git a/akka-actor/src/main/scala/akka/io/Udp.scala b/akka-actor/src/main/scala/akka/io/Udp.scala
index 06aea64137..2481ae4042 100644
--- a/akka-actor/src/main/scala/akka/io/Udp.scala
+++ b/akka-actor/src/main/scala/akka/io/Udp.scala
@@ -94,9 +94,10 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider {
* The listener actor for the newly bound port will reply with a [[Bound]]
* message, or the manager will reply with a [[CommandFailed]] message.
*/
- final case class Bind(handler: ActorRef,
- localAddress: InetSocketAddress,
- options: immutable.Traversable[SocketOption] = Nil)
+ final case class Bind(
+ handler: ActorRef,
+ localAddress: InetSocketAddress,
+ options: immutable.Traversable[SocketOption] = Nil)
extends Command
/**
diff --git a/akka-actor/src/main/scala/akka/io/UdpConnected.scala b/akka-actor/src/main/scala/akka/io/UdpConnected.scala
index ba1acd7cec..a41cab76ff 100644
--- a/akka-actor/src/main/scala/akka/io/UdpConnected.scala
+++ b/akka-actor/src/main/scala/akka/io/UdpConnected.scala
@@ -71,9 +71,10 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide
* has been successfully enqueued to the O/S kernel.
*/
final case class Send(payload: ByteString, ack: Any) extends Command {
- require(ack
- != null,
- "ack must be non-null. Use NoAck if you don't want acks.")
+ require(
+ ack
+ != null,
+ "ack must be non-null. Use NoAck if you don't want acks.")
def wantsAck: Boolean = !ack.isInstanceOf[NoAck]
}
@@ -87,10 +88,11 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide
* which is restricted to sending to and receiving from the given `remoteAddress`.
* All received datagrams will be sent to the designated `handler` actor.
*/
- final case class Connect(handler: ActorRef,
- remoteAddress: InetSocketAddress,
- localAddress: Option[InetSocketAddress] = None,
- options: immutable.Traversable[SocketOption] = Nil)
+ final case class Connect(
+ handler: ActorRef,
+ remoteAddress: InetSocketAddress,
+ localAddress: Option[InetSocketAddress] = None,
+ options: immutable.Traversable[SocketOption] = Nil)
extends Command
/**
@@ -153,8 +155,9 @@ class UdpConnectedExt(system: ExtendedActorSystem) extends IO.Extension {
val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp-connected"))
val manager: ActorRef = {
- system.systemActorOf(props = Props(classOf[UdpConnectedManager], this).withDeploy(Deploy.local),
- name = "IO-UDP-CONN")
+ system.systemActorOf(
+ props = Props(classOf[UdpConnectedManager], this).withDeploy(Deploy.local),
+ name = "IO-UDP-CONN")
}
/**
@@ -179,10 +182,11 @@ object UdpConnectedMessage {
* which is restricted to sending to and receiving from the given `remoteAddress`.
* All received datagrams will be sent to the designated `handler` actor.
*/
- def connect(handler: ActorRef,
- remoteAddress: InetSocketAddress,
- localAddress: InetSocketAddress,
- options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options)
+ def connect(
+ handler: ActorRef,
+ remoteAddress: InetSocketAddress,
+ localAddress: InetSocketAddress,
+ options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options)
/**
* Connect without specifying the `localAddress`.
diff --git a/akka-actor/src/main/scala/akka/io/UdpConnection.scala b/akka-actor/src/main/scala/akka/io/UdpConnection.scala
index aeafb51268..ea0fd55138 100644
--- a/akka-actor/src/main/scala/akka/io/UdpConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/UdpConnection.scala
@@ -20,10 +20,11 @@ import akka.io.UdpConnected._
/**
* INTERNAL API
*/
-private[io] class UdpConnection(udpConn: UdpConnectedExt,
- channelRegistry: ChannelRegistry,
- commander: ActorRef,
- connect: Connect)
+private[io] class UdpConnection(
+ udpConn: UdpConnectedExt,
+ channelRegistry: ChannelRegistry,
+ commander: ActorRef,
+ connect: Connect)
extends Actor
with ActorLogging
with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
@@ -158,10 +159,11 @@ private[io] class UdpConnection(udpConn: UdpConnectedExt,
thunk
} catch {
case NonFatal(e) =>
- log.debug("Failure while connecting UDP channel to remote address [{}] local address [{}]: {}",
- remoteAddress,
- localAddress.getOrElse("undefined"),
- e)
+ log.debug(
+ "Failure while connecting UDP channel to remote address [{}] local address [{}]: {}",
+ remoteAddress,
+ localAddress.getOrElse("undefined"),
+ e)
commander ! CommandFailed(connect)
context.stop(self)
}
diff --git a/akka-actor/src/main/scala/akka/io/UdpSender.scala b/akka-actor/src/main/scala/akka/io/UdpSender.scala
index cdef62689c..233d1fdc1f 100644
--- a/akka-actor/src/main/scala/akka/io/UdpSender.scala
+++ b/akka-actor/src/main/scala/akka/io/UdpSender.scala
@@ -15,10 +15,11 @@ import akka.actor._
/**
* INTERNAL API
*/
-private[io] class UdpSender(val udp: UdpExt,
- channelRegistry: ChannelRegistry,
- commander: ActorRef,
- options: immutable.Traversable[SocketOption])
+private[io] class UdpSender(
+ val udp: UdpExt,
+ channelRegistry: ChannelRegistry,
+ commander: ActorRef,
+ options: immutable.Traversable[SocketOption])
extends Actor
with ActorLogging
with WithUdpSend
diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala
index 18f4943139..28f05d7fd1 100644
--- a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala
+++ b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala
@@ -68,12 +68,13 @@ private[dns] object CNameRecord {
}
}
-final case class SRVRecord(override val name: String,
- override val ttl: Ttl,
- priority: Int,
- weight: Int,
- port: Int,
- target: String)
+final case class SRVRecord(
+ override val name: String,
+ override val ttl: Ttl,
+ priority: Int,
+ weight: Int,
+ port: Int,
+ target: String)
extends ResourceRecord(name, ttl, RecordType.SRV.code, RecordClass.IN.code) {}
/**
@@ -94,11 +95,12 @@ private[dns] object SRVRecord {
}
}
-final case class UnknownRecord(override val name: String,
- override val ttl: Ttl,
- override val recType: Short,
- override val recClass: Short,
- data: ByteString)
+final case class UnknownRecord(
+ override val name: String,
+ override val ttl: Ttl,
+ override val recType: Short,
+ override val recClass: Short,
+ data: ByteString)
extends ResourceRecord(name, ttl, recType, recClass) {}
/**
@@ -111,12 +113,13 @@ private[dns] object UnknownRecord {
* INTERNAL API
*/
@InternalApi
- def parseBody(name: String,
- ttl: Ttl,
- recType: Short,
- recClass: Short,
- @unused length: Short,
- it: ByteIterator): UnknownRecord =
+ def parseBody(
+ name: String,
+ ttl: Ttl,
+ recType: Short,
+ recClass: Short,
+ @unused length: Short,
+ it: ByteIterator): UnknownRecord =
UnknownRecord(name, ttl, recType, recClass, it.toByteString)
}
diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala
index 92cae34cb5..800d52d8a5 100644
--- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala
+++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala
@@ -21,9 +21,10 @@ import scala.collection.immutable
*/
@InternalApi class AsyncDnsCache extends Dns with PeriodicCacheCleanup with NoSerializationVerificationNeeded {
private val cacheRef = new AtomicReference(
- new Cache[(String, RequestType), Resolved](immutable.SortedSet()(expiryEntryOrdering()),
- immutable.Map(),
- () => clock))
+ new Cache[(String, RequestType), Resolved](
+ immutable.SortedSet()(expiryEntryOrdering()),
+ immutable.Map(),
+ () => clock))
private val nanoBase = System.nanoTime()
diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala
index 9c1d95f666..f0806503dc 100644
--- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala
+++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala
@@ -33,12 +33,13 @@ private[akka] object AsyncDnsManager {
* INTERNAL API
*/
@InternalApi
-private[io] final class AsyncDnsManager(name: String,
- system: ExtendedActorSystem,
- resolverConfig: Config,
- cache: Dns,
- dispatcher: String,
- provider: DnsProvider)
+private[io] final class AsyncDnsManager(
+ name: String,
+ system: ExtendedActorSystem,
+ resolverConfig: Config,
+ cache: Dns,
+ dispatcher: String,
+ provider: DnsProvider)
extends Actor
with RequiresMessageQueue[UnboundedMessageQueueSemantics]
with ActorLogging
@@ -50,12 +51,13 @@ private[io] final class AsyncDnsManager(name: String,
* Ctr expected by the DnsExt for all DnsMangers
*/
def this(ext: DnsExt) =
- this(ext.Settings.Resolver,
- ext.system,
- ext.Settings.ResolverConfig,
- ext.cache,
- ext.Settings.Dispatcher,
- ext.provider)
+ this(
+ ext.Settings.Resolver,
+ ext.system,
+ ext.Settings.ResolverConfig,
+ ext.cache,
+ ext.Settings.Dispatcher,
+ ext.provider)
implicit val ec = context.dispatcher
diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala
index d3b5acb310..68c5a1ff1c 100644
--- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala
+++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala
@@ -25,9 +25,10 @@ import scala.util.control.NonFatal
* INTERNAL API
*/
@InternalApi
-private[io] final class AsyncDnsResolver(settings: DnsSettings,
- cache: AsyncDnsCache,
- clientFactory: (ActorRefFactory, List[InetSocketAddress]) => List[ActorRef])
+private[io] final class AsyncDnsResolver(
+ settings: DnsSettings,
+ cache: AsyncDnsCache,
+ clientFactory: (ActorRefFactory, List[InetSocketAddress]) => List[ActorRef])
extends Actor
with ActorLogging {
@@ -40,10 +41,11 @@ private[io] final class AsyncDnsResolver(settings: DnsSettings,
val nameServers = settings.NameServers
- log.debug("Using name servers [{}] and search domains [{}] with ndots={}",
- nameServers,
- settings.SearchDomains,
- settings.NDots)
+ log.debug(
+ "Using name servers [{}] and search domains [{}] with ndots={}",
+ nameServers,
+ settings.SearchDomains,
+ settings.NDots)
private var requestId: Short = 0
private def nextId(): Short = {
@@ -72,9 +74,10 @@ private[io] final class AsyncDnsResolver(settings: DnsSettings,
}
}
- private def resolveWithResolvers(name: String,
- requestType: RequestType,
- resolvers: List[ActorRef]): Future[DnsProtocol.Resolved] =
+ private def resolveWithResolvers(
+ name: String,
+ requestType: RequestType,
+ resolvers: List[ActorRef]): Future[DnsProtocol.Resolved] =
if (isInetAddress(name)) {
Future.fromTry {
Try {
@@ -107,9 +110,10 @@ private[io] final class AsyncDnsResolver(settings: DnsSettings,
result
}
- private def resolveWithSearch(name: String,
- requestType: RequestType,
- resolver: ActorRef): Future[DnsProtocol.Resolved] = {
+ private def resolveWithSearch(
+ name: String,
+ requestType: RequestType,
+ resolver: ActorRef): Future[DnsProtocol.Resolved] = {
if (settings.SearchDomains.nonEmpty) {
val nameWithSearch = settings.SearchDomains.map(sd => name + "." + sd)
// ndots is a heuristic used to try and work out whether the name passed in is a fully qualified domain name,
@@ -131,9 +135,10 @@ private[io] final class AsyncDnsResolver(settings: DnsSettings,
}
}
- private def resolveFirst(searchNames: List[String],
- requestType: RequestType,
- resolver: ActorRef): Future[DnsProtocol.Resolved] = {
+ private def resolveFirst(
+ searchNames: List[String],
+ requestType: RequestType,
+ resolver: ActorRef): Future[DnsProtocol.Resolved] = {
searchNames match {
case searchName :: Nil =>
resolve(searchName, requestType, resolver)
diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala
index cfca83da78..32f1443f43 100644
--- a/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala
+++ b/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala
@@ -142,11 +142,12 @@ import scala.concurrent.duration._
def createTcpClient() = {
context.actorOf(
- BackoffSupervisor.props(Props(classOf[TcpDnsClient], tcp, ns, self),
- childName = "tcpDnsClient",
- minBackoff = 10.millis,
- maxBackoff = 20.seconds,
- randomFactor = 0.1),
+ BackoffSupervisor.props(
+ Props(classOf[TcpDnsClient], tcp, ns, self),
+ childName = "tcpDnsClient",
+ minBackoff = 10.millis,
+ maxBackoff = 20.seconds,
+ randomFactor = 0.1),
"tcpDnsClientSupervisor")
}
}
diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala
index e7e0e99a54..ae36d894a6 100644
--- a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala
+++ b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala
@@ -76,13 +76,14 @@ private[internal] case class MessageFlags(flags: Short) extends AnyVal {
*/
@InternalApi
private[internal] object MessageFlags {
- def apply(answer: Boolean = false,
- opCode: OpCode.Value = OpCode.QUERY,
- authoritativeAnswer: Boolean = false,
- truncated: Boolean = false,
- recursionDesired: Boolean = true,
- recursionAvailable: Boolean = false,
- responseCode: ResponseCode.Value = ResponseCode.SUCCESS): MessageFlags = {
+ def apply(
+ answer: Boolean = false,
+ opCode: OpCode.Value = OpCode.QUERY,
+ authoritativeAnswer: Boolean = false,
+ truncated: Boolean = false,
+ recursionDesired: Boolean = true,
+ recursionAvailable: Boolean = false,
+ responseCode: ResponseCode.Value = ResponseCode.SUCCESS): MessageFlags = {
new MessageFlags(
((if (answer) 0x8000 else 0) |
(opCode.id << 11) |
@@ -98,12 +99,13 @@ private[internal] object MessageFlags {
* INTERNAL API
*/
@InternalApi
-private[internal] case class Message(id: Short,
- flags: MessageFlags,
- questions: Seq[Question] = Seq.empty,
- answerRecs: Seq[ResourceRecord] = Seq.empty,
- authorityRecs: Seq[ResourceRecord] = Seq.empty,
- additionalRecs: Seq[ResourceRecord] = Seq.empty) {
+private[internal] case class Message(
+ id: Short,
+ flags: MessageFlags,
+ questions: Seq[Question] = Seq.empty,
+ answerRecs: Seq[ResourceRecord] = Seq.empty,
+ authorityRecs: Seq[ResourceRecord] = Seq.empty,
+ additionalRecs: Seq[ResourceRecord] = Seq.empty) {
def write(): ByteString = {
val ret = ByteString.newBuilder
write(ret)
diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala
index 28295231dc..20ffbcc192 100644
--- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala
+++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala
@@ -280,9 +280,10 @@ object AskableActorRef {
/**
* INTERNAL API
*/
- @InternalApi private[akka] def negativeTimeoutException(recipient: Any,
- message: Any,
- sender: ActorRef): IllegalArgumentException = {
+ @InternalApi private[akka] def negativeTimeoutException(
+ recipient: Any,
+ message: Any,
+ sender: ActorRef): IllegalArgumentException = {
new IllegalArgumentException(
s"Timeout length must be positive, question not sent to [$recipient]. " +
messagePartOfException(message, sender))
@@ -291,9 +292,10 @@ object AskableActorRef {
/**
* INTERNAL API
*/
- @InternalApi private[akka] def recipientTerminatedException(recipient: Any,
- message: Any,
- sender: ActorRef): AskTimeoutException = {
+ @InternalApi private[akka] def recipientTerminatedException(
+ recipient: Any,
+ message: Any,
+ sender: ActorRef): AskTimeoutException = {
new AskTimeoutException(
s"Recipient [$recipient] had already been terminated. " +
messagePartOfException(message, sender))
@@ -302,9 +304,10 @@ object AskableActorRef {
/**
* INTERNAL API
*/
- @InternalApi private[akka] def unsupportedRecipientType(recipient: Any,
- message: Any,
- sender: ActorRef): IllegalArgumentException = {
+ @InternalApi private[akka] def unsupportedRecipientType(
+ recipient: Any,
+ message: Any,
+ sender: ActorRef): IllegalArgumentException = {
new IllegalArgumentException(
s"Unsupported recipient type, question not sent to [$recipient]. " +
messagePartOfException(message, sender))
@@ -497,9 +500,10 @@ final class ExplicitlyAskableActorSelection(val actorSel: ActorSelection) extend
*
* INTERNAL API
*/
-private[akka] final class PromiseActorRef private (val provider: ActorRefProvider,
- val result: Promise[Any],
- _mcn: String)
+private[akka] final class PromiseActorRef private (
+ val provider: ActorRefProvider,
+ val result: Promise[Any],
+ _mcn: String)
extends MinimalActorRef {
import AbstractPromiseActorRef.{ stateOffset, watchedByOffset }
import PromiseActorRef._
@@ -666,12 +670,13 @@ private[akka] object PromiseActorRef {
private val ActorStopResult = Failure(ActorKilledException("Stopped"))
private val defaultOnTimeout: String => Throwable = str => new AskTimeoutException(str)
- def apply(provider: ActorRefProvider,
- timeout: Timeout,
- targetName: Any,
- messageClassName: String,
- sender: ActorRef = Actor.noSender,
- onTimeout: String => Throwable = defaultOnTimeout): PromiseActorRef = {
+ def apply(
+ provider: ActorRefProvider,
+ timeout: Timeout,
+ targetName: Any,
+ messageClassName: String,
+ sender: ActorRef = Actor.noSender,
+ onTimeout: String => Throwable = defaultOnTimeout): PromiseActorRef = {
val result = Promise[Any]()
val scheduler = provider.guardian.underlying.system.scheduler
val a = new PromiseActorRef(provider, result, messageClassName)
diff --git a/akka-actor/src/main/scala/akka/pattern/Backoff.scala b/akka-actor/src/main/scala/akka/pattern/Backoff.scala
index 611016a3c9..7490f24694 100644
--- a/akka-actor/src/main/scala/akka/pattern/Backoff.scala
+++ b/akka-actor/src/main/scala/akka/pattern/Backoff.scala
@@ -69,12 +69,13 @@ object Backoff {
*
*/
@deprecated("Use BackoffOpts.onFailure instead", "2.5.22")
- def onFailure(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxNrOfRetries: Int): BackoffOptions =
+ def onFailure(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxNrOfRetries: Int): BackoffOptions =
BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
.withMaxNrOfRetries(maxNrOfRetries)
@@ -125,11 +126,12 @@ object Backoff {
* In order to skip this additional delay pass in `0`.
*/
@deprecated("Use BackoffOpts.onFailure instead", "2.5.22")
- def onFailure(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): BackoffOptions =
+ def onFailure(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): BackoffOptions =
BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
/**
@@ -183,12 +185,13 @@ object Backoff {
*/
@Deprecated
@deprecated("Use BackoffOpts.onFailure instead", "2.5.22")
- def onFailure(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxNrOfRetries: Int): BackoffOptions =
+ def onFailure(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxNrOfRetries: Int): BackoffOptions =
onFailure(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, maxNrOfRetries)
/**
@@ -239,11 +242,12 @@ object Backoff {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts maxNrOfRetries instead.", "2.5.17")
- def onFailure(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double): BackoffOptions =
+ def onFailure(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double): BackoffOptions =
onFailure(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, -1)
/**
@@ -303,12 +307,13 @@ object Backoff {
* In order to restart infinitely pass in `-1`.
*/
@deprecated("Use BackoffOpts.onStop instead", "2.5.22")
- def onStop(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxNrOfRetries: Int): BackoffOptions =
+ def onStop(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxNrOfRetries: Int): BackoffOptions =
BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
.withMaxNrOfRetries(maxNrOfRetries)
@@ -366,11 +371,12 @@ object Backoff {
* In order to skip this additional delay pass in `0`.
*/
@deprecated("Use BackoffOpts.onStop instead", "2.5.22")
- def onStop(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): BackoffOptions =
+ def onStop(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): BackoffOptions =
BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
/**
@@ -431,12 +437,13 @@ object Backoff {
*/
@Deprecated
@deprecated("Use BackoffOpts.onStop instead", "2.5.22")
- def onStop(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxNrOfRetries: Int): BackoffOptions =
+ def onStop(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxNrOfRetries: Int): BackoffOptions =
onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, maxNrOfRetries)
/**
@@ -494,11 +501,12 @@ object Backoff {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts maxNrOfRetries instead.", "2.5.17")
- def onStop(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double): BackoffOptions =
+ def onStop(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double): BackoffOptions =
onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, -1)
}
@@ -557,17 +565,17 @@ trait BackoffOptions {
private[akka] def props: Props
}
-private final case class BackoffOptionsImpl(backoffType: BackoffType = RestartImpliesFailure,
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- reset: Option[BackoffReset] = None,
- supervisorStrategy: OneForOneStrategy =
- OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider),
- replyWhileStopped: Option[Any] = None,
- finalStopMessage: Option[Any => Boolean] = None)
+private final case class BackoffOptionsImpl(
+ backoffType: BackoffType = RestartImpliesFailure,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ reset: Option[BackoffReset] = None,
+ supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider),
+ replyWhileStopped: Option[Any] = None,
+ finalStopMessage: Option[Any => Boolean] = None)
extends akka.pattern.BackoffOptions {
val backoffReset = reset.getOrElse(AutoReset(minBackoff))
@@ -598,26 +606,28 @@ private final case class BackoffOptionsImpl(backoffType: BackoffType = RestartIm
//onFailure method in companion object
case RestartImpliesFailure =>
Props(
- new BackoffOnRestartSupervisor(childProps,
- childName,
- minBackoff,
- maxBackoff,
- backoffReset,
- randomFactor,
- supervisorStrategy,
- replyWhileStopped))
+ new BackoffOnRestartSupervisor(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ backoffReset,
+ randomFactor,
+ supervisorStrategy,
+ replyWhileStopped))
//onStop method in companion object
case StopImpliesFailure =>
Props(
- new BackoffOnStopSupervisor(childProps,
- childName,
- minBackoff,
- maxBackoff,
- backoffReset,
- randomFactor,
- supervisorStrategy,
- replyWhileStopped,
- finalStopMessage))
+ new BackoffOnStopSupervisor(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ backoffReset,
+ randomFactor,
+ supervisorStrategy,
+ replyWhileStopped,
+ finalStopMessage))
}
}
}
diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala
index 92098fe8b0..446a0110a1 100644
--- a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala
+++ b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala
@@ -62,11 +62,12 @@ object BackoffOpts {
* random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay.
* In order to skip this additional delay pass in `0`.
*/
- def onFailure(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): BackoffOnFailureOptions =
+ def onFailure(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): BackoffOnFailureOptions =
BackoffOnFailureOptionsImpl(childProps, childName, minBackoff, maxBackoff, randomFactor)
/**
@@ -115,11 +116,12 @@ object BackoffOpts {
* random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay.
* In order to skip this additional delay pass in `0`.
*/
- def onFailure(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double): BackoffOnFailureOptions =
+ def onFailure(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double): BackoffOnFailureOptions =
onFailure(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor)
/**
@@ -175,11 +177,12 @@ object BackoffOpts {
* random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay.
* In order to skip this additional delay pass in `0`.
*/
- def onStop(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): BackoffOnStopOptions =
+ def onStop(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): BackoffOnStopOptions =
BackoffOnStopOptionsImpl(childProps, childName, minBackoff, maxBackoff, randomFactor)
/**
@@ -235,11 +238,12 @@ object BackoffOpts {
* random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay.
* In order to skip this additional delay pass in `0`.
*/
- def onStop(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double): BackoffOnStopOptions =
+ def onStop(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double): BackoffOnStopOptions =
onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor)
}
@@ -322,16 +326,16 @@ sealed trait BackoffOnStopOptions extends ExtendedBackoffOptions[BackoffOnStopOp
@DoNotInherit
sealed trait BackoffOnFailureOptions extends ExtendedBackoffOptions[BackoffOnFailureOptions]
-private final case class BackoffOnStopOptionsImpl[T](childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- reset: Option[BackoffReset] = None,
- supervisorStrategy: OneForOneStrategy =
- OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider),
- replyWhileStopped: Option[Any] = None,
- finalStopMessage: Option[Any => Boolean] = None)
+private final case class BackoffOnStopOptionsImpl[T](
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ reset: Option[BackoffReset] = None,
+ supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider),
+ replyWhileStopped: Option[Any] = None,
+ finalStopMessage: Option[Any => Boolean] = None)
extends BackoffOnStopOptions {
private val backoffReset = reset.getOrElse(AutoReset(minBackoff))
@@ -362,15 +366,16 @@ private final case class BackoffOnStopOptionsImpl[T](childProps: Props,
}
Props(
- new BackoffOnStopSupervisor(childProps,
- childName,
- minBackoff,
- maxBackoff,
- backoffReset,
- randomFactor,
- supervisorStrategy,
- replyWhileStopped,
- finalStopMessage))
+ new BackoffOnStopSupervisor(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ backoffReset,
+ randomFactor,
+ supervisorStrategy,
+ replyWhileStopped,
+ finalStopMessage))
}
}
@@ -406,14 +411,15 @@ private final case class BackoffOnFailureOptionsImpl[T](
}
Props(
- new BackoffOnRestartSupervisor(childProps,
- childName,
- minBackoff,
- maxBackoff,
- backoffReset,
- randomFactor,
- supervisorStrategy,
- replyWhileStopped))
+ new BackoffOnRestartSupervisor(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ backoffReset,
+ randomFactor,
+ supervisorStrategy,
+ replyWhileStopped))
}
}
diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala
index 1700d0c235..f8951ba490 100644
--- a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala
+++ b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala
@@ -34,17 +34,19 @@ object BackoffSupervisor {
* In order to skip this additional delay pass in `0`.
*/
@deprecated("Use props with BackoffOpts instead", since = "2.5.22")
- def props(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): Props = {
- propsWithSupervisorStrategy(childProps,
- childName,
- minBackoff,
- maxBackoff,
- randomFactor,
- SupervisorStrategy.defaultStrategy)
+ def props(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): Props = {
+ propsWithSupervisorStrategy(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ SupervisorStrategy.defaultStrategy)
}
/**
@@ -68,12 +70,13 @@ object BackoffSupervisor {
* In order to restart infinitely pass in `-1`.
*/
@deprecated("Use props with BackoffOpts instead", since = "2.5.22")
- def props(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxNrOfRetries: Int): Props = {
+ def props(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxNrOfRetries: Int): Props = {
val supervisionStrategy = SupervisorStrategy.defaultStrategy match {
case oneForOne: OneForOneStrategy => oneForOne.withMaxNrOfRetries(maxNrOfRetries)
case s => s
@@ -99,11 +102,12 @@ object BackoffSupervisor {
* In order to skip this additional delay pass in `0`.
*/
@deprecated("Use props with BackoffOpts instead", since = "2.5.22")
- def props(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double): Props = {
+ def props(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double): Props = {
props(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor)
}
@@ -128,12 +132,13 @@ object BackoffSupervisor {
* In order to restart infinitely pass in `-1`.
*/
@deprecated("Use props with BackoffOpts instead", since = "2.5.22")
- def props(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxNrOfRetries: Int): Props = {
+ def props(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxNrOfRetries: Int): Props = {
props(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, maxNrOfRetries)
}
@@ -159,25 +164,27 @@ object BackoffSupervisor {
* backoff process, only a [[OneForOneStrategy]] makes sense here.
*/
@deprecated("Use props with BackoffOpts instead", since = "2.5.22")
- def propsWithSupervisorStrategy(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- strategy: SupervisorStrategy): Props = {
+ def propsWithSupervisorStrategy(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ strategy: SupervisorStrategy): Props = {
require(minBackoff > Duration.Zero, "minBackoff must be > 0")
require(maxBackoff >= minBackoff, "maxBackoff must be >= minBackoff")
require(0.0 <= randomFactor && randomFactor <= 1.0, "randomFactor must be between 0.0 and 1.0")
Props(
- new BackoffOnStopSupervisor(childProps,
- childName,
- minBackoff,
- maxBackoff,
- AutoReset(minBackoff),
- randomFactor,
- strategy,
- None,
- None))
+ new BackoffOnStopSupervisor(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ AutoReset(minBackoff),
+ randomFactor,
+ strategy,
+ None,
+ None))
}
/**
@@ -202,12 +209,13 @@ object BackoffSupervisor {
* backoff process, only a [[OneForOneStrategy]] makes sense here.
*/
@deprecated("Use props with BackoffOpts instead", since = "2.5.22")
- def propsWithSupervisorStrategy(childProps: Props,
- childName: String,
- minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- strategy: SupervisorStrategy): Props = {
+ def propsWithSupervisorStrategy(
+ childProps: Props,
+ childName: String,
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ strategy: SupervisorStrategy): Props = {
propsWithSupervisorStrategy(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, strategy)
}
@@ -294,10 +302,11 @@ object BackoffSupervisor {
*
* Calculates an exponential back off delay.
*/
- private[akka] def calculateDelay(restartCount: Int,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double): FiniteDuration = {
+ private[akka] def calculateDelay(
+ restartCount: Int,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double): FiniteDuration = {
val rnd = 1.0 + ThreadLocalRandom.current().nextDouble() * randomFactor
val calculatedDuration = Try(maxBackoff.min(minBackoff * math.pow(2, restartCount)) * rnd).getOrElse(maxBackoff)
calculatedDuration match {
@@ -309,58 +318,64 @@ object BackoffSupervisor {
// for backwards compability
@deprecated("Use `BackoffSupervisor.props` method instead", since = "2.5.22")
-final class BackoffSupervisor(override val childProps: Props,
- override val childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- override val reset: BackoffReset,
- randomFactor: Double,
- strategy: SupervisorStrategy,
- val replyWhileStopped: Option[Any],
- val finalStopMessage: Option[Any => Boolean])
- extends BackoffOnStopSupervisor(childProps,
- childName,
- minBackoff,
- maxBackoff,
- reset,
- randomFactor,
- strategy,
- replyWhileStopped,
- finalStopMessage) {
+final class BackoffSupervisor(
+ override val childProps: Props,
+ override val childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ override val reset: BackoffReset,
+ randomFactor: Double,
+ strategy: SupervisorStrategy,
+ val replyWhileStopped: Option[Any],
+ val finalStopMessage: Option[Any => Boolean])
+ extends BackoffOnStopSupervisor(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ reset,
+ randomFactor,
+ strategy,
+ replyWhileStopped,
+ finalStopMessage) {
// for binary compatibility with 2.5.18
- def this(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- reset: BackoffReset,
- randomFactor: Double,
- strategy: SupervisorStrategy,
- replyWhileStopped: Option[Any]) =
+ def this(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ reset: BackoffReset,
+ randomFactor: Double,
+ strategy: SupervisorStrategy,
+ replyWhileStopped: Option[Any]) =
this(childProps, childName, minBackoff, maxBackoff, reset, randomFactor, strategy, replyWhileStopped, None)
// for binary compatibility with 2.4.1
- def this(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- supervisorStrategy: SupervisorStrategy) =
- this(childProps,
- childName,
- minBackoff,
- maxBackoff,
- AutoReset(minBackoff),
- randomFactor,
- supervisorStrategy,
- None,
- None)
+ def this(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ supervisorStrategy: SupervisorStrategy) =
+ this(
+ childProps,
+ childName,
+ minBackoff,
+ maxBackoff,
+ AutoReset(minBackoff),
+ randomFactor,
+ supervisorStrategy,
+ None,
+ None)
// for binary compatibility with 2.4.0
- def this(childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double) =
+ def this(
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double) =
this(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy)
}
diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala
index eb5fb23852..8899c30073 100644
--- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala
+++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala
@@ -43,10 +43,11 @@ object CircuitBreaker {
* @param callTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to consider a call a failure
* @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit
*/
- def apply(scheduler: Scheduler,
- maxFailures: Int,
- callTimeout: FiniteDuration,
- resetTimeout: FiniteDuration): CircuitBreaker =
+ def apply(
+ scheduler: Scheduler,
+ maxFailures: Int,
+ callTimeout: FiniteDuration,
+ resetTimeout: FiniteDuration): CircuitBreaker =
new CircuitBreaker(scheduler, maxFailures, callTimeout, resetTimeout)(sameThreadExecutionContext)
/**
@@ -62,10 +63,11 @@ object CircuitBreaker {
* @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit
*/
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def create(scheduler: Scheduler,
- maxFailures: Int,
- callTimeout: FiniteDuration,
- resetTimeout: FiniteDuration): CircuitBreaker =
+ def create(
+ scheduler: Scheduler,
+ maxFailures: Int,
+ callTimeout: FiniteDuration,
+ resetTimeout: FiniteDuration): CircuitBreaker =
apply(scheduler, maxFailures, callTimeout, resetTimeout)
/**
@@ -80,10 +82,11 @@ object CircuitBreaker {
* @param callTimeout [[java.time.Duration]] of time after which to consider a call a failure
* @param resetTimeout [[java.time.Duration]] of time after which to attempt to close the circuit
*/
- def create(scheduler: Scheduler,
- maxFailures: Int,
- callTimeout: java.time.Duration,
- resetTimeout: java.time.Duration): CircuitBreaker =
+ def create(
+ scheduler: Scheduler,
+ maxFailures: Int,
+ callTimeout: java.time.Duration,
+ resetTimeout: java.time.Duration): CircuitBreaker =
apply(scheduler, maxFailures, callTimeout.asScala, resetTimeout.asScala)
private val exceptionAsFailure: Try[_] => Boolean = {
@@ -130,30 +133,33 @@ object CircuitBreaker {
* @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit
* @param executor [[scala.concurrent.ExecutionContext]] used for execution of state transition listeners
*/
-class CircuitBreaker(scheduler: Scheduler,
- maxFailures: Int,
- callTimeout: FiniteDuration,
- val resetTimeout: FiniteDuration,
- maxResetTimeout: FiniteDuration,
- exponentialBackoffFactor: Double)(implicit executor: ExecutionContext)
+class CircuitBreaker(
+ scheduler: Scheduler,
+ maxFailures: Int,
+ callTimeout: FiniteDuration,
+ val resetTimeout: FiniteDuration,
+ maxResetTimeout: FiniteDuration,
+ exponentialBackoffFactor: Double)(implicit executor: ExecutionContext)
extends AbstractCircuitBreaker {
require(exponentialBackoffFactor >= 1.0, "factor must be >= 1.0")
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def this(executor: ExecutionContext,
- scheduler: Scheduler,
- maxFailures: Int,
- callTimeout: FiniteDuration,
- resetTimeout: FiniteDuration) = {
+ def this(
+ executor: ExecutionContext,
+ scheduler: Scheduler,
+ maxFailures: Int,
+ callTimeout: FiniteDuration,
+ resetTimeout: FiniteDuration) = {
this(scheduler, maxFailures, callTimeout, resetTimeout, 36500.days, 1.0)(executor)
}
- def this(executor: ExecutionContext,
- scheduler: Scheduler,
- maxFailures: Int,
- callTimeout: java.time.Duration,
- resetTimeout: java.time.Duration) = {
+ def this(
+ executor: ExecutionContext,
+ scheduler: Scheduler,
+ maxFailures: Int,
+ callTimeout: java.time.Duration,
+ resetTimeout: java.time.Duration) = {
this(scheduler, maxFailures, callTimeout.asScala, resetTimeout.asScala, 36500.days, 1.0)(executor)
}
@@ -220,10 +226,11 @@ class CircuitBreaker(scheduler: Scheduler,
*/
@inline
private[this] def swapResetTimeout(oldResetTimeout: FiniteDuration, newResetTimeout: FiniteDuration): Boolean =
- Unsafe.instance.compareAndSwapObject(this,
- AbstractCircuitBreaker.resetTimeoutOffset,
- oldResetTimeout,
- newResetTimeout)
+ Unsafe.instance.compareAndSwapObject(
+ this,
+ AbstractCircuitBreaker.resetTimeoutOffset,
+ oldResetTimeout,
+ newResetTimeout)
/**
* Helper method for accessing to the underlying resetTimeout via Unsafe
@@ -334,10 +341,12 @@ class CircuitBreaker(scheduler: Scheduler,
* @return The result of the call
*/
def withSyncCircuitBreaker[T](body: => T, defineFailureFn: Try[T] => Boolean): T =
- Await.result(withCircuitBreaker(try Future.successful(body)
- catch { case NonFatal(t) => Future.failed(t) },
- defineFailureFn),
- callTimeout)
+ Await.result(
+ withCircuitBreaker(
+ try Future.successful(body)
+ catch { case NonFatal(t) => Future.failed(t) },
+ defineFailureFn),
+ callTimeout)
/**
* Java API for [[#withSyncCircuitBreaker]]. Throws [[java.util.concurrent.TimeoutException]] if the call timed out.
@@ -1028,7 +1037,8 @@ class CircuitBreaker(scheduler: Scheduler,
* currently in half-open state.
* @param message Defaults to "Circuit Breaker is open; calls are failing fast"
*/
-class CircuitBreakerOpenException(val remainingDuration: FiniteDuration,
- message: String = "Circuit Breaker is open; calls are failing fast")
+class CircuitBreakerOpenException(
+ val remainingDuration: FiniteDuration,
+ message: String = "Circuit Breaker is open; calls are failing fast")
extends AkkaException(message)
with NoStackTrace
diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala
index 7f36fd5feb..efc65c71b7 100644
--- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala
+++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala
@@ -123,9 +123,10 @@ object Patterns {
* @param messageFactory function taking an actor ref and returning the message to be sent
* @param timeout the timeout for the response before failing the returned completion stage
*/
- def askWithReplyTo(actor: ActorRef,
- messageFactory: japi.function.Function[ActorRef, Any],
- timeout: java.time.Duration): CompletionStage[AnyRef] =
+ def askWithReplyTo(
+ actor: ActorRef,
+ messageFactory: japi.function.Function[ActorRef, Any],
+ timeout: java.time.Duration): CompletionStage[AnyRef] =
extended.ask(actor, messageFactory.apply _)(Timeout.create(timeout)).toJava.asInstanceOf[CompletionStage[AnyRef]]
/**
@@ -173,9 +174,10 @@ object Patterns {
* timeout);
* }}}
*/
- def askWithReplyTo(actor: ActorRef,
- messageFactory: japi.Function[ActorRef, Any],
- timeoutMillis: Long): Future[AnyRef] =
+ def askWithReplyTo(
+ actor: ActorRef,
+ messageFactory: japi.Function[ActorRef, Any],
+ timeoutMillis: Long): Future[AnyRef] =
extended.ask(actor, messageFactory.apply _)(Timeout(timeoutMillis.millis)).asInstanceOf[Future[AnyRef]]
/**
@@ -287,9 +289,10 @@ object Patterns {
* timeout);
* }}}
*/
- def askWithReplyTo(selection: ActorSelection,
- messageFactory: japi.Function[ActorRef, Any],
- timeoutMillis: Long): Future[AnyRef] =
+ def askWithReplyTo(
+ selection: ActorSelection,
+ messageFactory: japi.Function[ActorRef, Any],
+ timeoutMillis: Long): Future[AnyRef] =
extended.ask(selection, messageFactory.apply _)(Timeout(timeoutMillis.millis)).asInstanceOf[Future[AnyRef]]
/**
@@ -303,9 +306,10 @@ object Patterns {
* timeout);
* }}}
*/
- def askWithReplyTo(selection: ActorSelection,
- messageFactory: japi.Function[ActorRef, Any],
- timeout: java.time.Duration): CompletionStage[AnyRef] =
+ def askWithReplyTo(
+ selection: ActorSelection,
+ messageFactory: japi.Function[ActorRef, Any],
+ timeout: java.time.Duration): CompletionStage[AnyRef] =
extended.ask(selection, messageFactory.apply _)(timeout.asScala).toJava.asInstanceOf[CompletionStage[AnyRef]]
/**
@@ -402,29 +406,32 @@ object Patterns {
* If the target actor isn't terminated within the timeout the [[java.util.concurrent.CompletionStage]]
* is completed with failure [[akka.pattern.AskTimeoutException]].
*/
- def gracefulStop(target: ActorRef,
- timeout: java.time.Duration,
- stopMessage: Any): CompletionStage[java.lang.Boolean] =
+ def gracefulStop(
+ target: ActorRef,
+ timeout: java.time.Duration,
+ stopMessage: Any): CompletionStage[java.lang.Boolean] =
scalaGracefulStop(target, timeout.asScala, stopMessage).toJava.asInstanceOf[CompletionStage[java.lang.Boolean]]
/**
* Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided Callable
* after the specified duration.
*/
- def after[T](duration: FiniteDuration,
- scheduler: Scheduler,
- context: ExecutionContext,
- value: Callable[Future[T]]): Future[T] =
+ def after[T](
+ duration: FiniteDuration,
+ scheduler: Scheduler,
+ context: ExecutionContext,
+ value: Callable[Future[T]]): Future[T] =
scalaAfter(duration, scheduler)(value.call())(context)
/**
* Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided Callable
* after the specified duration.
*/
- def after[T](duration: java.time.Duration,
- scheduler: Scheduler,
- context: ExecutionContext,
- value: Callable[CompletionStage[T]]): CompletionStage[T] =
+ def after[T](
+ duration: java.time.Duration,
+ scheduler: Scheduler,
+ context: ExecutionContext,
+ value: Callable[CompletionStage[T]]): CompletionStage[T] =
afterCompletionStage(duration.asScala, scheduler)(value.call())(context)
/**
@@ -440,10 +447,11 @@ object Patterns {
* after the specified duration.
*/
@deprecated("Use the overloaded one which accepts a Callable of CompletionStage instead.", since = "2.5.22")
- def after[T](duration: java.time.Duration,
- scheduler: Scheduler,
- context: ExecutionContext,
- value: CompletionStage[T]): CompletionStage[T] =
+ def after[T](
+ duration: java.time.Duration,
+ scheduler: Scheduler,
+ context: ExecutionContext,
+ value: CompletionStage[T]): CompletionStage[T] =
afterCompletionStage(duration.asScala, scheduler)(value)(context)
/**
@@ -454,11 +462,12 @@ object Patterns {
* Note that the attempt function will be invoked on the given execution context for subsequent tries and
* therefore must be thread safe (not touch unsafe mutable state).
*/
- def retry[T](attempt: Callable[Future[T]],
- attempts: Int,
- delay: FiniteDuration,
- scheduler: Scheduler,
- context: ExecutionContext): Future[T] =
+ def retry[T](
+ attempt: Callable[Future[T]],
+ attempts: Int,
+ delay: FiniteDuration,
+ scheduler: Scheduler,
+ context: ExecutionContext): Future[T] =
scalaRetry(() => attempt.call, attempts, delay)(context, scheduler)
/**
@@ -469,11 +478,12 @@ object Patterns {
* Note that the attempt function will be invoked on the given execution context for subsequent tries
* and therefore must be thread safe (not touch unsafe mutable state).
*/
- def retry[T](attempt: Callable[CompletionStage[T]],
- attempts: Int,
- delay: java.time.Duration,
- scheduler: Scheduler,
- ec: ExecutionContext): CompletionStage[T] =
+ def retry[T](
+ attempt: Callable[CompletionStage[T]],
+ attempts: Int,
+ delay: java.time.Duration,
+ scheduler: Scheduler,
+ ec: ExecutionContext): CompletionStage[T] =
scalaRetry(() => attempt.call().toScala, attempts, delay.asScala)(ec, scheduler).toJava
}
@@ -569,9 +579,10 @@ object PatternsCS {
* @param timeout the timeout for the response before failing the returned completion operator
*/
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.15")
- def askWithReplyTo(actor: ActorRef,
- messageFactory: japi.function.Function[ActorRef, Any],
- timeout: Timeout): CompletionStage[AnyRef] =
+ def askWithReplyTo(
+ actor: ActorRef,
+ messageFactory: japi.function.Function[ActorRef, Any],
+ timeout: Timeout): CompletionStage[AnyRef] =
extended.ask(actor, messageFactory.apply _)(timeout).toJava.asInstanceOf[CompletionStage[AnyRef]]
/**
@@ -590,9 +601,10 @@ object PatternsCS {
* @param timeout the timeout for the response before failing the returned completion stage
*/
@deprecated("Use Pattens.askWithReplyTo instead.", since = "2.5.19")
- def askWithReplyTo(actor: ActorRef,
- messageFactory: japi.function.Function[ActorRef, Any],
- timeout: java.time.Duration): CompletionStage[AnyRef] =
+ def askWithReplyTo(
+ actor: ActorRef,
+ messageFactory: japi.function.Function[ActorRef, Any],
+ timeout: java.time.Duration): CompletionStage[AnyRef] =
extended.ask(actor, messageFactory.apply _)(Timeout.create(timeout)).toJava.asInstanceOf[CompletionStage[AnyRef]]
/**
@@ -643,9 +655,10 @@ object PatternsCS {
* @param timeoutMillis the timeout for the response before failing the returned completion operator
*/
@deprecated("Use Pattens.askWithReplyTo which accepts java.time.Duration instead.", since = "2.5.19")
- def askWithReplyTo(actor: ActorRef,
- messageFactory: japi.function.Function[ActorRef, Any],
- timeoutMillis: Long): CompletionStage[AnyRef] =
+ def askWithReplyTo(
+ actor: ActorRef,
+ messageFactory: japi.function.Function[ActorRef, Any],
+ timeoutMillis: Long): CompletionStage[AnyRef] =
askWithReplyTo(actor, messageFactory, Timeout(timeoutMillis.millis))
/**
@@ -754,9 +767,10 @@ object PatternsCS {
* }}}
*/
@deprecated("Use Pattens.askWithReplyTo which accepts java.time.Duration instead.", since = "2.5.19")
- def askWithReplyTo(selection: ActorSelection,
- messageFactory: japi.Function[ActorRef, Any],
- timeoutMillis: Long): CompletionStage[AnyRef] =
+ def askWithReplyTo(
+ selection: ActorSelection,
+ messageFactory: japi.Function[ActorRef, Any],
+ timeoutMillis: Long): CompletionStage[AnyRef] =
extended
.ask(selection, messageFactory.apply _)(Timeout(timeoutMillis.millis))
.toJava
@@ -842,9 +856,10 @@ object PatternsCS {
* is completed with failure [[akka.pattern.AskTimeoutException]].
*/
@deprecated("Use Patterns.gracefulStop instead.", since = "2.5.19")
- def gracefulStop(target: ActorRef,
- timeout: java.time.Duration,
- stopMessage: Any): CompletionStage[java.lang.Boolean] =
+ def gracefulStop(
+ target: ActorRef,
+ timeout: java.time.Duration,
+ stopMessage: Any): CompletionStage[java.lang.Boolean] =
scalaGracefulStop(target, timeout.asScala, stopMessage).toJava.asInstanceOf[CompletionStage[java.lang.Boolean]]
/**
@@ -852,10 +867,11 @@ object PatternsCS {
* after the specified duration.
*/
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def after[T](duration: FiniteDuration,
- scheduler: Scheduler,
- context: ExecutionContext,
- value: Callable[CompletionStage[T]]): CompletionStage[T] =
+ def after[T](
+ duration: FiniteDuration,
+ scheduler: Scheduler,
+ context: ExecutionContext,
+ value: Callable[CompletionStage[T]]): CompletionStage[T] =
afterCompletionStage(duration, scheduler)(value.call())(context)
/**
@@ -863,34 +879,39 @@ object PatternsCS {
* after the specified duration.
*/
@deprecated("Use Patterns.after instead.", since = "2.5.19")
- def after[T](duration: java.time.Duration,
- scheduler: Scheduler,
- context: ExecutionContext,
- value: Callable[CompletionStage[T]]): CompletionStage[T] =
+ def after[T](
+ duration: java.time.Duration,
+ scheduler: Scheduler,
+ context: ExecutionContext,
+ value: Callable[CompletionStage[T]]): CompletionStage[T] =
afterCompletionStage(duration.asScala, scheduler)(value.call())(context)
/**
* Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided value
* after the specified duration.
*/
- @deprecated("Use Patterns.after which accepts java.time.Duration and Callable of CompletionStage instead.",
- since = "2.5.22")
- def after[T](duration: FiniteDuration,
- scheduler: Scheduler,
- context: ExecutionContext,
- value: CompletionStage[T]): CompletionStage[T] =
+ @deprecated(
+ "Use Patterns.after which accepts java.time.Duration and Callable of CompletionStage instead.",
+ since = "2.5.22")
+ def after[T](
+ duration: FiniteDuration,
+ scheduler: Scheduler,
+ context: ExecutionContext,
+ value: CompletionStage[T]): CompletionStage[T] =
afterCompletionStage(duration, scheduler)(value)(context)
/**
* Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided value
* after the specified duration.
*/
- @deprecated("Use Patterns.after which accepts java.time.Duration and Callable of CompletionStage instead.",
- since = "2.5.22")
- def after[T](duration: java.time.Duration,
- scheduler: Scheduler,
- context: ExecutionContext,
- value: CompletionStage[T]): CompletionStage[T] =
+ @deprecated(
+ "Use Patterns.after which accepts java.time.Duration and Callable of CompletionStage instead.",
+ since = "2.5.22")
+ def after[T](
+ duration: java.time.Duration,
+ scheduler: Scheduler,
+ context: ExecutionContext,
+ value: CompletionStage[T]): CompletionStage[T] =
afterCompletionStage(duration.asScala, scheduler)(value)(context)
/**
@@ -902,10 +923,11 @@ object PatternsCS {
* and therefore must be thread safe (not touch unsafe mutable state).
*/
@deprecated("Use Patterns.retry instead.", since = "2.5.19")
- def retry[T](attempt: Callable[CompletionStage[T]],
- attempts: Int,
- delay: java.time.Duration,
- scheduler: Scheduler,
- ec: ExecutionContext): CompletionStage[T] =
+ def retry[T](
+ attempt: Callable[CompletionStage[T]],
+ attempts: Int,
+ delay: java.time.Duration,
+ scheduler: Scheduler,
+ ec: ExecutionContext): CompletionStage[T] =
scalaRetry(() => attempt.call().toScala, attempts, delay.asScala)(ec, scheduler).toJava
}
diff --git a/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala b/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala
index 90f92e91af..6cf636edb8 100644
--- a/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala
+++ b/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala
@@ -35,8 +35,9 @@ trait RetrySupport {
* )
* }}}
*/
- def retry[T](attempt: () => Future[T], attempts: Int, delay: FiniteDuration)(implicit ec: ExecutionContext,
- scheduler: Scheduler): Future[T] = {
+ def retry[T](attempt: () => Future[T], attempts: Int, delay: FiniteDuration)(
+ implicit ec: ExecutionContext,
+ scheduler: Scheduler): Future[T] = {
try {
if (attempts > 0) {
attempt().recoverWith {
diff --git a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala
index 45c913648a..5f5c77bd68 100644
--- a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala
+++ b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala
@@ -18,14 +18,15 @@ import scala.concurrent.duration._
* This back-off supervisor is created by using ``akka.pattern.BackoffSupervisor.props``
* with ``akka.pattern.BackoffOpts.onFailure``.
*/
-@InternalApi private[pattern] class BackoffOnRestartSupervisor(val childProps: Props,
- val childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- val reset: BackoffReset,
- randomFactor: Double,
- strategy: OneForOneStrategy,
- replyWhileStopped: Option[Any])
+@InternalApi private[pattern] class BackoffOnRestartSupervisor(
+ val childProps: Props,
+ val childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ val reset: BackoffReset,
+ randomFactor: Double,
+ strategy: OneForOneStrategy,
+ replyWhileStopped: Option[Any])
extends Actor
with HandleBackoff
with ActorLogging {
@@ -55,9 +56,10 @@ import scala.concurrent.duration._
val nextRestartCount = restartCount + 1
if (strategy.maxNrOfRetries >= 0 && nextRestartCount > strategy.maxNrOfRetries) {
// If we've exceeded the maximum # of retries allowed by the Strategy, die.
- log.debug(s"Terminating on restart #{} which exceeds max allowed restarts ({})",
- nextRestartCount,
- strategy.maxNrOfRetries)
+ log.debug(
+ s"Terminating on restart #{} which exceeds max allowed restarts ({})",
+ nextRestartCount,
+ strategy.maxNrOfRetries)
become(receive)
stop(self)
} else {
diff --git a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala
index 8180d7f630..4a2ee66faa 100644
--- a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala
+++ b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala
@@ -18,15 +18,16 @@ import scala.concurrent.duration.FiniteDuration
* This back-off supervisor is created by using `akka.pattern.BackoffSupervisor.props`
* with `BackoffOpts.onStop`.
*/
-@InternalApi private[pattern] class BackoffOnStopSupervisor(val childProps: Props,
- val childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- val reset: BackoffReset,
- randomFactor: Double,
- strategy: SupervisorStrategy,
- replyWhileStopped: Option[Any],
- finalStopMessage: Option[Any => Boolean])
+@InternalApi private[pattern] class BackoffOnStopSupervisor(
+ val childProps: Props,
+ val childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ val reset: BackoffReset,
+ randomFactor: Double,
+ strategy: SupervisorStrategy,
+ replyWhileStopped: Option[Any],
+ finalStopMessage: Option[Any => Boolean])
extends Actor
with HandleBackoff
with ActorLogging {
@@ -64,9 +65,10 @@ import scala.concurrent.duration.FiniteDuration
context.system.scheduler.scheduleOnce(restartDelay, self, StartChild)
restartCount = nextRestartCount
} else {
- log.debug(s"Terminating on restart #{} which exceeds max allowed restarts ({})",
- nextRestartCount,
- maxNrOfRetries)
+ log.debug(
+ s"Terminating on restart #{} which exceeds max allowed restarts ({})",
+ nextRestartCount,
+ maxNrOfRetries)
context.stop(self)
}
}
diff --git a/akka-actor/src/main/scala/akka/routing/Balancing.scala b/akka-actor/src/main/scala/akka/routing/Balancing.scala
index ba16268114..1a933af627 100644
--- a/akka-actor/src/main/scala/akka/routing/Balancing.scala
+++ b/akka-actor/src/main/scala/akka/routing/Balancing.scala
@@ -66,9 +66,10 @@ private[akka] final class BalancingRoutingLogic extends RoutingLogic {
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
-final case class BalancingPool(val nrOfInstances: Int,
- override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+final case class BalancingPool(
+ val nrOfInstances: Int,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Pool {
def this(config: Config) =
@@ -112,14 +113,16 @@ final case class BalancingPool(val nrOfInstances: Int,
// dispatcher of this pool
val deployDispatcherConfigPath = s"akka.actor.deployment.$deployPath.pool-dispatcher"
val systemConfig = context.system.settings.config
- val dispatcherConfig = context.system.dispatchers.config(dispatcherId,
- // use the user defined 'pool-dispatcher' config as fallback, if any
- if (systemConfig.hasPath(deployDispatcherConfigPath))
- systemConfig.getConfig(deployDispatcherConfigPath)
- else ConfigFactory.empty)
+ val dispatcherConfig = context.system.dispatchers.config(
+ dispatcherId,
+ // use the user defined 'pool-dispatcher' config as fallback, if any
+ if (systemConfig.hasPath(deployDispatcherConfigPath))
+ systemConfig.getConfig(deployDispatcherConfigPath)
+ else ConfigFactory.empty)
- dispatchers.registerConfigurator(dispatcherId,
- new BalancingDispatcherConfigurator(dispatcherConfig, dispatchers.prerequisites))
+ dispatchers.registerConfigurator(
+ dispatcherId,
+ new BalancingDispatcherConfigurator(dispatcherConfig, dispatchers.prerequisites))
}
val routeePropsWithDispatcher = routeeProps.withDispatcher(dispatcherId)
diff --git a/akka-actor/src/main/scala/akka/routing/Broadcast.scala b/akka-actor/src/main/scala/akka/routing/Broadcast.scala
index 125167550c..891c06bb7a 100644
--- a/akka-actor/src/main/scala/akka/routing/Broadcast.scala
+++ b/akka-actor/src/main/scala/akka/routing/Broadcast.scala
@@ -56,18 +56,20 @@ final class BroadcastRoutingLogic extends RoutingLogic {
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
-final case class BroadcastPool(val nrOfInstances: Int,
- override val resizer: Option[Resizer] = None,
- override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+final case class BroadcastPool(
+ val nrOfInstances: Int,
+ override val resizer: Option[Resizer] = None,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool
with PoolOverrideUnsetConfig[BroadcastPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
- resizer = Resizer.fromConfig(config),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
+ resizer = Resizer.fromConfig(config),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
@@ -118,8 +120,9 @@ final case class BroadcastPool(val nrOfInstances: Int,
* router management messages
*/
@SerialVersionUID(1L)
-final case class BroadcastGroup(val paths: immutable.Iterable[String],
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+final case class BroadcastGroup(
+ val paths: immutable.Iterable[String],
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala
index 53c156472b..e71f8b3531 100644
--- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala
+++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala
@@ -112,13 +112,14 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v
object ConsistentHash {
def apply[T: ClassTag](nodes: Iterable[T], virtualNodesFactor: Int): ConsistentHash[T] = {
- new ConsistentHash(immutable.SortedMap.empty[Int, T] ++
- (for {
- node <- nodes
- nodeHash = hashFor(node.toString)
- vnode <- 1 to virtualNodesFactor
- } yield (concatenateNodeHash(nodeHash, vnode) -> node)),
- virtualNodesFactor)
+ new ConsistentHash(
+ immutable.SortedMap.empty[Int, T] ++
+ (for {
+ node <- nodes
+ nodeHash = hashFor(node.toString)
+ vnode <- 1 to virtualNodesFactor
+ } yield (concatenateNodeHash(nodeHash, vnode) -> node)),
+ virtualNodesFactor)
}
/**
diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala
index 94cc20bf80..ae6d30272c 100644
--- a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala
+++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala
@@ -137,10 +137,10 @@ object ConsistentHashingRoutingLogic {
*
*/
@SerialVersionUID(1L)
-final case class ConsistentHashingRoutingLogic(system: ActorSystem,
- virtualNodesFactor: Int = 0,
- hashMapping: ConsistentHashingRouter.ConsistentHashMapping =
- ConsistentHashingRouter.emptyConsistentHashMapping)
+final case class ConsistentHashingRoutingLogic(
+ system: ActorSystem,
+ virtualNodesFactor: Int = 0,
+ hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
extends RoutingLogic {
import ConsistentHashingRouter._
@@ -225,10 +225,11 @@ final case class ConsistentHashingRoutingLogic(system: ActorSystem,
case _ if hashMapping.isDefinedAt(message) => target(hashMapping(message))
case hashable: ConsistentHashable => target(hashable.consistentHashKey)
case _ =>
- log.warning("Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
- message.getClass.getName,
- classOf[ConsistentHashable].getName,
- classOf[ConsistentHashableEnvelope].getName)
+ log.warning(
+ "Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
+ message.getClass.getName,
+ classOf[ConsistentHashable].getName,
+ classOf[ConsistentHashableEnvelope].getName)
NoRoutee
}
}
@@ -284,9 +285,10 @@ final case class ConsistentHashingPool(
with PoolOverrideUnsetConfig[ConsistentHashingPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
- resizer = Resizer.fromConfig(config),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
+ resizer = Resizer.fromConfig(config),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
@@ -360,11 +362,11 @@ final case class ConsistentHashingPool(
* router management messages
*/
@SerialVersionUID(1L)
-final case class ConsistentHashingGroup(val paths: immutable.Iterable[String],
- val virtualNodesFactor: Int = 0,
- val hashMapping: ConsistentHashingRouter.ConsistentHashMapping =
- ConsistentHashingRouter.emptyConsistentHashMapping,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+final case class ConsistentHashingGroup(
+ val paths: immutable.Iterable[String],
+ val virtualNodesFactor: Int = 0,
+ val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala
index a4a23e0295..940321dbfa 100644
--- a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala
+++ b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala
@@ -44,10 +44,11 @@ case object OptimalSizeExploringResizer {
/**
* INTERNAL API
*/
- private[routing] case class ResizeRecord(underutilizationStreak: Option[UnderUtilizationStreak] = None,
- messageCount: Long = 0,
- totalQueueLength: Int = 0,
- checkTime: Long = 0)
+ private[routing] case class ResizeRecord(
+ underutilizationStreak: Option[UnderUtilizationStreak] = None,
+ messageCount: Long = 0,
+ totalQueueLength: Int = 0,
+ checkTime: Long = 0)
/**
* INTERNAL API
@@ -55,19 +56,17 @@ case object OptimalSizeExploringResizer {
private[routing] type PerformanceLog = Map[PoolSize, Duration]
def apply(resizerCfg: Config): OptimalSizeExploringResizer =
- DefaultOptimalSizeExploringResizer(lowerBound = resizerCfg.getInt("lower-bound"),
- upperBound = resizerCfg.getInt("upper-bound"),
- chanceOfScalingDownWhenFull =
- resizerCfg.getDouble("chance-of-ramping-down-when-full"),
- actionInterval = resizerCfg.getDuration("action-interval").asScala,
- downsizeAfterUnderutilizedFor =
- resizerCfg.getDuration("downsize-after-underutilized-for").asScala,
- numOfAdjacentSizesToConsiderDuringOptimization =
- resizerCfg.getInt("optimization-range"),
- exploreStepSize = resizerCfg.getDouble("explore-step-size"),
- explorationProbability = resizerCfg.getDouble("chance-of-exploration"),
- weightOfLatestMetric = resizerCfg.getDouble("weight-of-latest-metric"),
- downsizeRatio = resizerCfg.getDouble("downsize-ratio"))
+ DefaultOptimalSizeExploringResizer(
+ lowerBound = resizerCfg.getInt("lower-bound"),
+ upperBound = resizerCfg.getInt("upper-bound"),
+ chanceOfScalingDownWhenFull = resizerCfg.getDouble("chance-of-ramping-down-when-full"),
+ actionInterval = resizerCfg.getDuration("action-interval").asScala,
+ downsizeAfterUnderutilizedFor = resizerCfg.getDuration("downsize-after-underutilized-for").asScala,
+ numOfAdjacentSizesToConsiderDuringOptimization = resizerCfg.getInt("optimization-range"),
+ exploreStepSize = resizerCfg.getDouble("explore-step-size"),
+ explorationProbability = resizerCfg.getDouble("chance-of-exploration"),
+ weightOfLatestMetric = resizerCfg.getDouble("weight-of-latest-metric"),
+ downsizeRatio = resizerCfg.getDouble("downsize-ratio"))
}
@@ -117,16 +116,17 @@ case object OptimalSizeExploringResizer {
*
*/
@SerialVersionUID(1L)
-case class DefaultOptimalSizeExploringResizer(lowerBound: PoolSize = 1,
- upperBound: PoolSize = 30,
- chanceOfScalingDownWhenFull: Double = 0.2,
- actionInterval: Duration = 5.seconds,
- numOfAdjacentSizesToConsiderDuringOptimization: Int = 16,
- exploreStepSize: Double = 0.1,
- downsizeRatio: Double = 0.8,
- downsizeAfterUnderutilizedFor: Duration = 72.hours,
- explorationProbability: Double = 0.4,
- weightOfLatestMetric: Double = 0.5)
+case class DefaultOptimalSizeExploringResizer(
+ lowerBound: PoolSize = 1,
+ upperBound: PoolSize = 30,
+ chanceOfScalingDownWhenFull: Double = 0.2,
+ actionInterval: Duration = 5.seconds,
+ numOfAdjacentSizesToConsiderDuringOptimization: Int = 16,
+ exploreStepSize: Double = 0.1,
+ downsizeRatio: Double = 0.8,
+ downsizeAfterUnderutilizedFor: Duration = 72.hours,
+ explorationProbability: Double = 0.4,
+ weightOfLatestMetric: Double = 0.5)
extends OptimalSizeExploringResizer {
/**
@@ -172,12 +172,14 @@ case class DefaultOptimalSizeExploringResizer(lowerBound: PoolSize = 1,
throw new IllegalArgumentException(
"upperBound must be >= lowerBound, was: [%s] < [%s]".format(upperBound, lowerBound))
- checkParamLowerBound(numOfAdjacentSizesToConsiderDuringOptimization,
- 2,
- "numOfAdjacentSizesToConsiderDuringOptimization")
+ checkParamLowerBound(
+ numOfAdjacentSizesToConsiderDuringOptimization,
+ 2,
+ "numOfAdjacentSizesToConsiderDuringOptimization")
checkParamAsProbability(chanceOfScalingDownWhenFull, "chanceOfScalingDownWhenFull")
- checkParamAsPositiveNum(numOfAdjacentSizesToConsiderDuringOptimization,
- "numOfAdjacentSizesToConsiderDuringOptimization")
+ checkParamAsPositiveNum(
+ numOfAdjacentSizesToConsiderDuringOptimization,
+ "numOfAdjacentSizesToConsiderDuringOptimization")
checkParamAsPositiveNum(exploreStepSize, "exploreStepSize")
checkParamAsPositiveNum(downsizeRatio, "downsizeRatio")
checkParamAsProbability(explorationProbability, "explorationProbability")
@@ -196,8 +198,9 @@ case class DefaultOptimalSizeExploringResizer(lowerBound: PoolSize = 1,
record = newRecord
}
- private[routing] def updatedStats(currentRoutees: immutable.IndexedSeq[Routee],
- messageCounter: Long): (PerformanceLog, ResizeRecord) = {
+ private[routing] def updatedStats(
+ currentRoutees: immutable.IndexedSeq[Routee],
+ messageCounter: Long): (PerformanceLog, ResizeRecord) = {
val now = LocalDateTime.now
val currentSize = currentRoutees.length
@@ -221,8 +224,9 @@ case class DefaultOptimalSizeExploringResizer(lowerBound: PoolSize = 1,
None
else
Some(
- UnderUtilizationStreak(record.underutilizationStreak.fold(now)(_.start),
- Math.max(record.underutilizationStreak.fold(0)(_.highestUtilization), utilized)))
+ UnderUtilizationStreak(
+ record.underutilizationStreak.fold(now)(_.start),
+ Math.max(record.underutilizationStreak.fold(0)(_.highestUtilization), utilized)))
val newPerformanceLog: PerformanceLog =
if (fullyUtilized && record.underutilizationStreak.isEmpty && record.checkTime > 0) {
@@ -240,10 +244,11 @@ case class DefaultOptimalSizeExploringResizer(lowerBound: PoolSize = 1,
} else performanceLog
} else performanceLog
- val newRecord = record.copy(underutilizationStreak = newUnderutilizationStreak,
- messageCount = messageCounter,
- totalQueueLength = totalQueueLength,
- checkTime = System.nanoTime())
+ val newRecord = record.copy(
+ underutilizationStreak = newUnderutilizationStreak,
+ messageCount = messageCounter,
+ totalQueueLength = totalQueueLength,
+ checkTime = System.nanoTime())
(newPerformanceLog, newRecord)
diff --git a/akka-actor/src/main/scala/akka/routing/Random.scala b/akka-actor/src/main/scala/akka/routing/Random.scala
index 7f905acf2e..02d2109620 100644
--- a/akka-actor/src/main/scala/akka/routing/Random.scala
+++ b/akka-actor/src/main/scala/akka/routing/Random.scala
@@ -57,18 +57,20 @@ final class RandomRoutingLogic extends RoutingLogic {
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
-final case class RandomPool(val nrOfInstances: Int,
- override val resizer: Option[Resizer] = None,
- override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+final case class RandomPool(
+ val nrOfInstances: Int,
+ override val resizer: Option[Resizer] = None,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool
with PoolOverrideUnsetConfig[RandomPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
- resizer = Resizer.fromConfig(config),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
+ resizer = Resizer.fromConfig(config),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
@@ -119,8 +121,9 @@ final case class RandomPool(val nrOfInstances: Int,
* router management messages
*/
@SerialVersionUID(1L)
-final case class RandomGroup(val paths: immutable.Iterable[String],
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+final case class RandomGroup(
+ val paths: immutable.Iterable[String],
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/Resizer.scala b/akka-actor/src/main/scala/akka/routing/Resizer.scala
index a695721070..83c273f8ee 100644
--- a/akka-actor/src/main/scala/akka/routing/Resizer.scala
+++ b/akka-actor/src/main/scala/akka/routing/Resizer.scala
@@ -79,13 +79,14 @@ case object DefaultResizer {
* Creates a new DefaultResizer from the given configuration
*/
def apply(resizerConfig: Config): DefaultResizer =
- DefaultResizer(lowerBound = resizerConfig.getInt("lower-bound"),
- upperBound = resizerConfig.getInt("upper-bound"),
- pressureThreshold = resizerConfig.getInt("pressure-threshold"),
- rampupRate = resizerConfig.getDouble("rampup-rate"),
- backoffThreshold = resizerConfig.getDouble("backoff-threshold"),
- backoffRate = resizerConfig.getDouble("backoff-rate"),
- messagesPerResize = resizerConfig.getInt("messages-per-resize"))
+ DefaultResizer(
+ lowerBound = resizerConfig.getInt("lower-bound"),
+ upperBound = resizerConfig.getInt("upper-bound"),
+ pressureThreshold = resizerConfig.getInt("pressure-threshold"),
+ rampupRate = resizerConfig.getDouble("rampup-rate"),
+ backoffThreshold = resizerConfig.getDouble("backoff-threshold"),
+ backoffRate = resizerConfig.getDouble("backoff-rate"),
+ messagesPerResize = resizerConfig.getInt("messages-per-resize"))
def fromConfig(resizerConfig: Config): Option[DefaultResizer] =
if (resizerConfig.getBoolean("resizer.enabled"))
@@ -126,13 +127,14 @@ case object DefaultResizer {
* Use 1 to resize before each message.
*/
@SerialVersionUID(1L)
-case class DefaultResizer(val lowerBound: Int = 1,
- val upperBound: Int = 10,
- val pressureThreshold: Int = 1,
- val rampupRate: Double = 0.2,
- val backoffThreshold: Double = 0.3,
- val backoffRate: Double = 0.1,
- val messagesPerResize: Int = 10)
+case class DefaultResizer(
+ val lowerBound: Int = 1,
+ val upperBound: Int = 10,
+ val pressureThreshold: Int = 1,
+ val rampupRate: Double = 0.2,
+ val backoffThreshold: Double = 0.3,
+ val backoffRate: Double = 0.1,
+ val messagesPerResize: Int = 10)
extends Resizer {
/**
@@ -250,13 +252,14 @@ case class DefaultResizer(val lowerBound: Int = 1,
/**
* INTERNAL API
*/
-private[akka] final class ResizablePoolCell(_system: ActorSystemImpl,
- _ref: InternalActorRef,
- _routerProps: Props,
- _routerDispatcher: MessageDispatcher,
- _routeeProps: Props,
- _supervisor: InternalActorRef,
- val pool: Pool)
+private[akka] final class ResizablePoolCell(
+ _system: ActorSystemImpl,
+ _ref: InternalActorRef,
+ _routerProps: Props,
+ _routerDispatcher: MessageDispatcher,
+ _routeeProps: Props,
+ _supervisor: InternalActorRef,
+ val pool: Pool)
extends RoutedActorCell(_system, _ref, _routerProps, _routerDispatcher, _routeeProps, _supervisor) {
require(pool.resizer.isDefined, "RouterConfig must be a Pool with defined resizer")
diff --git a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala
index ae0a0b2e38..e688d6712f 100644
--- a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala
+++ b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala
@@ -65,18 +65,20 @@ final class RoundRobinRoutingLogic extends RoutingLogic {
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
-final case class RoundRobinPool(val nrOfInstances: Int,
- override val resizer: Option[Resizer] = None,
- override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+final case class RoundRobinPool(
+ val nrOfInstances: Int,
+ override val resizer: Option[Resizer] = None,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool
with PoolOverrideUnsetConfig[RoundRobinPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
- resizer = Resizer.fromConfig(config),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
+ resizer = Resizer.fromConfig(config),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
@@ -128,8 +130,9 @@ final case class RoundRobinPool(val nrOfInstances: Int,
* router management messages
*/
@SerialVersionUID(1L)
-final case class RoundRobinGroup(val paths: immutable.Iterable[String],
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+final case class RoundRobinGroup(
+ val paths: immutable.Iterable[String],
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala
index 90bb69b39f..6748858670 100644
--- a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala
+++ b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala
@@ -36,12 +36,13 @@ private[akka] object RoutedActorCell {
/**
* INTERNAL API
*/
-private[akka] class RoutedActorCell(_system: ActorSystemImpl,
- _ref: InternalActorRef,
- _routerProps: Props,
- _routerDispatcher: MessageDispatcher,
- val routeeProps: Props,
- _supervisor: InternalActorRef)
+private[akka] class RoutedActorCell(
+ _system: ActorSystemImpl,
+ _ref: InternalActorRef,
+ _routerProps: Props,
+ _routerDispatcher: MessageDispatcher,
+ val routeeProps: Props,
+ _supervisor: InternalActorRef)
extends ActorCell(_system, _ref, _routerProps, _routerDispatcher, _supervisor) {
private[akka] val routerConfig = _routerProps.routerConfig
diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala
index 477bcf41a0..bdef4e03e5 100644
--- a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala
+++ b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala
@@ -22,13 +22,14 @@ import akka.dispatch.MessageDispatcher
* A RoutedActorRef is an ActorRef that has a set of connected ActorRef and it uses a Router to
* send a message to one (or more) of these actors.
*/
-private[akka] class RoutedActorRef(_system: ActorSystemImpl,
- _routerProps: Props,
- _routerDispatcher: MessageDispatcher,
- _routerMailbox: MailboxType,
- _routeeProps: Props,
- _supervisor: InternalActorRef,
- _path: ActorPath)
+private[akka] class RoutedActorRef(
+ _system: ActorSystemImpl,
+ _routerProps: Props,
+ _routerDispatcher: MessageDispatcher,
+ _routerMailbox: MailboxType,
+ _routeeProps: Props,
+ _supervisor: InternalActorRef,
+ _path: ActorPath)
extends RepointableActorRef(_system, _routerProps, _routerDispatcher, _routerMailbox, _supervisor, _path) {
// verify that a BalancingDispatcher is not used with a Router
diff --git a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala
index e8bff85efb..dd1bda8877 100644
--- a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala
+++ b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala
@@ -272,9 +272,10 @@ case object FromConfig extends FromConfig {
* Java API: get the singleton instance
*/
def getInstance = this
- @inline final def apply(resizer: Option[Resizer] = None,
- supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- routerDispatcher: String = Dispatchers.DefaultDispatcherId) =
+ @inline final def apply(
+ resizer: Option[Resizer] = None,
+ supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ routerDispatcher: String = Dispatchers.DefaultDispatcherId) =
new FromConfig(resizer, supervisorStrategy, routerDispatcher)
@inline final def unapply(fc: FromConfig): Option[String] = Some(fc.routerDispatcher)
@@ -289,9 +290,10 @@ case object FromConfig extends FromConfig {
* (defaults to default-dispatcher).
*/
@SerialVersionUID(1L)
-class FromConfig(override val resizer: Option[Resizer],
- override val supervisorStrategy: SupervisorStrategy,
- override val routerDispatcher: String)
+class FromConfig(
+ override val resizer: Option[Resizer],
+ override val supervisorStrategy: SupervisorStrategy,
+ override val routerDispatcher: String)
extends Pool {
def this() = this(None, Pool.defaultSupervisorStrategy, Dispatchers.DefaultDispatcherId)
diff --git a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala
index a5fb5b9970..a2b8aef36e 100644
--- a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala
+++ b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala
@@ -38,8 +38,9 @@ final case class ScatterGatherFirstCompletedRoutingLogic(within: FiniteDuration)
* INTERNAL API
*/
@SerialVersionUID(1L)
-private[akka] final case class ScatterGatherFirstCompletedRoutees(routees: immutable.IndexedSeq[Routee],
- within: FiniteDuration)
+private[akka] final case class ScatterGatherFirstCompletedRoutees(
+ routees: immutable.IndexedSeq[Routee],
+ within: FiniteDuration)
extends Routee {
override def send(message: Any, sender: ActorRef): Unit =
@@ -108,10 +109,11 @@ final case class ScatterGatherFirstCompletedPool(
with PoolOverrideUnsetConfig[ScatterGatherFirstCompletedPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
- within = config.getMillisDuration("within"),
- resizer = Resizer.fromConfig(config),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
+ within = config.getMillisDuration("within"),
+ resizer = Resizer.fromConfig(config),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
@@ -176,10 +178,10 @@ final case class ScatterGatherFirstCompletedPool(
* router management messages
*/
@SerialVersionUID(1L)
-final case class ScatterGatherFirstCompletedGroup(val paths: immutable.Iterable[String],
- within: FiniteDuration,
- override val routerDispatcher: String =
- Dispatchers.DefaultDispatcherId)
+final case class ScatterGatherFirstCompletedGroup(
+ val paths: immutable.Iterable[String],
+ within: FiniteDuration,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala
index dafc92602d..b87f570716 100644
--- a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala
+++ b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala
@@ -46,11 +46,12 @@ class SmallestMailboxRoutingLogic extends RoutingLogic {
// 4. An ActorRef with unknown mailbox size that isn't processing anything
// 5. An ActorRef with a known mailbox size
// 6. An ActorRef without any messages
- @tailrec private def selectNext(targets: immutable.IndexedSeq[Routee],
- proposedTarget: Routee = NoRoutee,
- currentScore: Long = Long.MaxValue,
- at: Int = 0,
- deep: Boolean = false): Routee = {
+ @tailrec private def selectNext(
+ targets: immutable.IndexedSeq[Routee],
+ proposedTarget: Routee = NoRoutee,
+ currentScore: Long = Long.MaxValue,
+ at: Int = 0,
+ deep: Boolean = false): Routee = {
if (targets.isEmpty)
NoRoutee
else if (at >= targets.size) {
@@ -174,19 +175,20 @@ class SmallestMailboxRoutingLogic extends RoutingLogic {
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
-final case class SmallestMailboxPool(val nrOfInstances: Int,
- override val resizer: Option[Resizer] = None,
- override val supervisorStrategy: SupervisorStrategy =
- Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+final case class SmallestMailboxPool(
+ val nrOfInstances: Int,
+ override val resizer: Option[Resizer] = None,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool
with PoolOverrideUnsetConfig[SmallestMailboxPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
- resizer = Resizer.fromConfig(config),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
+ resizer = Resizer.fromConfig(config),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
diff --git a/akka-actor/src/main/scala/akka/routing/TailChopping.scala b/akka-actor/src/main/scala/akka/routing/TailChopping.scala
index a0a4654174..c0327e2a86 100644
--- a/akka-actor/src/main/scala/akka/routing/TailChopping.scala
+++ b/akka-actor/src/main/scala/akka/routing/TailChopping.scala
@@ -46,10 +46,11 @@ import scala.util.Random
* @param context execution context used by scheduler
*/
@SerialVersionUID(1L)
-final case class TailChoppingRoutingLogic(scheduler: Scheduler,
- within: FiniteDuration,
- interval: FiniteDuration,
- context: ExecutionContext)
+final case class TailChoppingRoutingLogic(
+ scheduler: Scheduler,
+ within: FiniteDuration,
+ interval: FiniteDuration,
+ context: ExecutionContext)
extends RoutingLogic {
override def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee = {
if (routees.isEmpty) NoRoutee
@@ -61,10 +62,11 @@ final case class TailChoppingRoutingLogic(scheduler: Scheduler,
* INTERNAL API
*/
@SerialVersionUID(1L)
-private[akka] final case class TailChoppingRoutees(scheduler: Scheduler,
- routees: immutable.IndexedSeq[Routee],
- within: FiniteDuration,
- interval: FiniteDuration)(implicit ec: ExecutionContext)
+private[akka] final case class TailChoppingRoutees(
+ scheduler: Scheduler,
+ routees: immutable.IndexedSeq[Routee],
+ within: FiniteDuration,
+ interval: FiniteDuration)(implicit ec: ExecutionContext)
extends Routee {
override def send(message: Any, sender: ActorRef): Unit = {
@@ -147,22 +149,24 @@ private[akka] final case class TailChoppingRoutees(scheduler: Scheduler,
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
-final case class TailChoppingPool(val nrOfInstances: Int,
- override val resizer: Option[Resizer] = None,
- within: FiniteDuration,
- interval: FiniteDuration,
- override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+final case class TailChoppingPool(
+ val nrOfInstances: Int,
+ override val resizer: Option[Resizer] = None,
+ within: FiniteDuration,
+ interval: FiniteDuration,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool
with PoolOverrideUnsetConfig[TailChoppingPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
- within = config.getMillisDuration("within"),
- interval = config.getMillisDuration("tail-chopping-router.interval"),
- resizer = Resizer.fromConfig(config),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
+ within = config.getMillisDuration("within"),
+ interval = config.getMillisDuration("tail-chopping-router.interval"),
+ resizer = Resizer.fromConfig(config),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
@@ -243,16 +247,18 @@ final case class TailChoppingPool(val nrOfInstances: Int,
* @param routerDispatcher dispatcher to use for the router head actor, which handles
* router management messages
*/
-final case class TailChoppingGroup(val paths: immutable.Iterable[String],
- within: FiniteDuration,
- interval: FiniteDuration,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+final case class TailChoppingGroup(
+ val paths: immutable.Iterable[String],
+ within: FiniteDuration,
+ interval: FiniteDuration,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
- this(paths = immutableSeq(config.getStringList("routees.paths")),
- within = config.getMillisDuration("within"),
- interval = config.getMillisDuration("tail-chopping-router.interval"))
+ this(
+ paths = immutableSeq(config.getStringList("routees.paths")),
+ within = config.getMillisDuration("within"),
+ interval = config.getMillisDuration("tail-chopping-router.interval"))
/**
* Java API
diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala
index 81d33a83f7..06ee8f2c0f 100644
--- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala
+++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala
@@ -346,12 +346,11 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
}
if (!unique(possibilitiesWithoutJavaSerializer)) {
- _log.warning(LogMarker.Security,
- "Multiple serializers found for [{}], choosing first of: [{}]",
- clazz.getName,
- possibilitiesWithoutJavaSerializer
- .map { case (_, s) => s.getClass.getName }
- .mkString(", "))
+ _log.warning(
+ LogMarker.Security,
+ "Multiple serializers found for [{}], choosing first of: [{}]",
+ clazz.getName,
+ possibilitiesWithoutJavaSerializer.map { case (_, s) => s.getClass.getName }.mkString(", "))
}
possibilitiesWithoutJavaSerializer.head._2
@@ -363,11 +362,12 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
serializerMap.putIfAbsent(clazz, ser) match {
case null =>
if (shouldWarnAboutJavaSerializer(clazz, ser)) {
- _log.warning(LogMarker.Security,
- "Using the default Java serializer for class [{}] which is not recommended because of " +
- "performance implications. Use another serializer or disable this warning using the setting " +
- "'akka.actor.warn-about-java-serializer-usage'",
- clazz.getName)
+ _log.warning(
+ LogMarker.Security,
+ "Using the default Java serializer for class [{}] which is not recommended because of " +
+ "performance implications. Use another serializer or disable this warning using the setting " +
+ "'akka.actor.warn-about-java-serializer-usage'",
+ clazz.getName)
}
if (!warnUnexpectedNonAkkaSerializer(clazz, ser))
@@ -456,11 +456,12 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
private def warnUnexpectedNonAkkaSerializer(clazz: Class[_], ser: Serializer): Boolean = {
if (clazz.getName.startsWith("akka.") && !ser.getClass.getName.startsWith("akka.")) {
- log.warning("Using serializer [{}] for message [{}]. Note that this serializer " +
- "is not implemented by Akka. It's not recommended to replace serializers for messages " +
- "provided by Akka.",
- ser.getClass.getName,
- clazz.getName)
+ log.warning(
+ "Using serializer [{}] for message [{}]. Note that this serializer " +
+ "is not implemented by Akka. It's not recommended to replace serializers for messages " +
+ "provided by Akka.",
+ ser.getClass.getName,
+ clazz.getName)
true
} else false
}
diff --git a/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala b/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala
index 382d66e3e9..3e8eec211e 100644
--- a/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala
+++ b/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala
@@ -64,6 +64,7 @@ object SerializerDetails {
* Constructor is internal API: Use the factories [[SerializerDetails#create]] or [[SerializerDetails#apply]]
* to construct
*/
-final class SerializerDetails private (val alias: String,
- val serializer: Serializer,
- val useFor: immutable.Seq[Class[_]])
+final class SerializerDetails private (
+ val alias: String,
+ val serializer: Serializer,
+ val useFor: immutable.Seq[Class[_]])
diff --git a/akka-actor/src/main/scala/akka/util/BoxedType.scala b/akka-actor/src/main/scala/akka/util/BoxedType.scala
index 4472a4979f..769ae04ed9 100644
--- a/akka-actor/src/main/scala/akka/util/BoxedType.scala
+++ b/akka-actor/src/main/scala/akka/util/BoxedType.scala
@@ -7,15 +7,16 @@ package akka.util
object BoxedType {
import java.{ lang => jl }
- private val toBoxed = Map[Class[_], Class[_]](classOf[Boolean] -> classOf[jl.Boolean],
- classOf[Byte] -> classOf[jl.Byte],
- classOf[Char] -> classOf[jl.Character],
- classOf[Short] -> classOf[jl.Short],
- classOf[Int] -> classOf[jl.Integer],
- classOf[Long] -> classOf[jl.Long],
- classOf[Float] -> classOf[jl.Float],
- classOf[Double] -> classOf[jl.Double],
- classOf[Unit] -> classOf[scala.runtime.BoxedUnit])
+ private val toBoxed = Map[Class[_], Class[_]](
+ classOf[Boolean] -> classOf[jl.Boolean],
+ classOf[Byte] -> classOf[jl.Byte],
+ classOf[Char] -> classOf[jl.Character],
+ classOf[Short] -> classOf[jl.Short],
+ classOf[Int] -> classOf[jl.Integer],
+ classOf[Long] -> classOf[jl.Long],
+ classOf[Float] -> classOf[jl.Float],
+ classOf[Double] -> classOf[jl.Double],
+ classOf[Unit] -> classOf[scala.runtime.BoxedUnit])
final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c
}
diff --git a/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala
index dbf3238aa6..625064ad25 100644
--- a/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala
+++ b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala
@@ -20,8 +20,9 @@ import scala.annotation.tailrec
* Keys and values are encoded consecutively in a single Int array and does copy-on-write with no
* structural sharing, it's intended for rather small maps (<1000 elements).
*/
-@InternalApi private[akka] final class ImmutableIntMap private (private final val kvs: Array[Int],
- final val size: Int) {
+@InternalApi private[akka] final class ImmutableIntMap private (
+ private final val kvs: Array[Int],
+ final val size: Int) {
private final def this(key: Int, value: Int) = {
this(new Array[Int](2), 1)
diff --git a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala
index 7038477fca..86abb82cf0 100644
--- a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala
+++ b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala
@@ -30,12 +30,13 @@ object ManifestInfo extends ExtensionId[ManifestInfo] with ExtensionIdProvider {
private val BundleVersion = "Bundle-Version"
private val BundleVendor = "Bundle-Vendor"
- private val knownVendors = Set("com.typesafe.akka",
- "com.lightbend.akka",
- "Lightbend Inc.",
- "Lightbend",
- "com.lightbend.lagom",
- "com.typesafe.play")
+ private val knownVendors = Set(
+ "com.typesafe.akka",
+ "com.lightbend.akka",
+ "Lightbend Inc.",
+ "Lightbend",
+ "com.lightbend.lagom",
+ "com.typesafe.play")
override def get(system: ActorSystem): ManifestInfo = super.get(system)
diff --git a/akka-actor/src/main/scala/akka/util/PrettyByteString.scala b/akka-actor/src/main/scala/akka/util/PrettyByteString.scala
index 5b2989cbfe..23d9ba5611 100644
--- a/akka-actor/src/main/scala/akka/util/PrettyByteString.scala
+++ b/akka-actor/src/main/scala/akka/util/PrettyByteString.scala
@@ -35,10 +35,11 @@ private[akka] object PrettyByteString {
if (bs.size <= maxBytes) Iterator(prefix + "\n", formatBytes(bs))
else
- Iterator(s"$prefix first + last $maxBytes:\n",
- formatBytes(bs.take(maxBytes)),
- s"\n$indent ... [${bs.size - maxBytes} bytes omitted] ...\n",
- formatBytes(bs.takeRight(maxBytes)))
+ Iterator(
+ s"$prefix first + last $maxBytes:\n",
+ formatBytes(bs.take(maxBytes)),
+ s"\n$indent ... [${bs.size - maxBytes} bytes omitted] ...\n",
+ formatBytes(bs.takeRight(maxBytes)))
}
}
diff --git a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala
index cc6bb50e62..9b4ab21470 100644
--- a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala
+++ b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala
@@ -39,10 +39,11 @@ private[akka] object PrettyDuration {
val unit = chooseUnit(nanos)
val value = nanos.toDouble / NANOSECONDS.convert(1, unit)
- s"%.${precision}g %s%s".formatLocal(Locale.ROOT,
- value,
- abbreviate(unit),
- if (includeNanos) s" ($nanos ns)" else "")
+ s"%.${precision}g %s%s".formatLocal(
+ Locale.ROOT,
+ value,
+ abbreviate(unit),
+ if (includeNanos) s" ($nanos ns)" else "")
case Duration.MinusInf => s"-∞ (minus infinity)"
case Duration.Inf => s"∞ (infinity)"
diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala
index dc7dc5e3ef..e13ee687a3 100644
--- a/akka-actor/src/main/scala/akka/util/Reflect.scala
+++ b/akka-actor/src/main/scala/akka/util/Reflect.scala
@@ -140,10 +140,11 @@ private[akka] object Reflect {
* INTERNAL API
* Set a val inside a class.
*/
- @tailrec protected[akka] final def lookupAndSetField(clazz: Class[_],
- instance: AnyRef,
- name: String,
- value: Any): Boolean = {
+ @tailrec protected[akka] final def lookupAndSetField(
+ clazz: Class[_],
+ instance: AnyRef,
+ name: String,
+ value: Any): Boolean = {
@tailrec def clearFirst(fields: Array[java.lang.reflect.Field], idx: Int): Boolean =
if (idx < fields.length) {
val field = fields(idx)
diff --git a/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala b/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala
index 57b8fb085c..4d0f3fb502 100644
--- a/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala
+++ b/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala
@@ -36,8 +36,9 @@ private[akka] final class SerializedSuspendableExecutionContext(throughput: Int)
with Runnable
with ExecutionContext {
import SerializedSuspendableExecutionContext._
- require(throughput > 0,
- s"SerializedSuspendableExecutionContext.throughput must be greater than 0 but was $throughput")
+ require(
+ throughput > 0,
+ s"SerializedSuspendableExecutionContext.throughput must be greater than 0 but was $throughput")
private final val state = new AtomicInteger(Off)
@tailrec private final def addState(newState: Int): Boolean = {
diff --git a/akka-actor/src/main/scala/akka/util/WildcardIndex.scala b/akka-actor/src/main/scala/akka/util/WildcardIndex.scala
index b7ba93da85..bf55bd4fe9 100644
--- a/akka-actor/src/main/scala/akka/util/WildcardIndex.scala
+++ b/akka-actor/src/main/scala/akka/util/WildcardIndex.scala
@@ -7,8 +7,9 @@ package akka.util
import scala.annotation.tailrec
import scala.collection.immutable.HashMap
-private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] = WildcardTree[T](),
- doubleWildcardTree: WildcardTree[T] = WildcardTree[T]()) {
+private[akka] final case class WildcardIndex[T](
+ wildcardTree: WildcardTree[T] = WildcardTree[T](),
+ doubleWildcardTree: WildcardTree[T] = WildcardTree[T]()) {
def insert(elems: Array[String], d: T): WildcardIndex[T] = elems.lastOption match {
case Some("**") => copy(doubleWildcardTree = doubleWildcardTree.insert(elems.iterator, d))
@@ -41,9 +42,9 @@ private[akka] object WildcardTree {
def apply[T](): WildcardTree[T] = empty.asInstanceOf[WildcardTree[T]]
}
-private[akka] final case class WildcardTree[T](data: Option[T] = None,
- children: Map[String, WildcardTree[T]] =
- HashMap[String, WildcardTree[T]]()) {
+private[akka] final case class WildcardTree[T](
+ data: Option[T] = None,
+ children: Map[String, WildcardTree[T]] = HashMap[String, WildcardTree[T]]()) {
def isEmpty: Boolean = data.isEmpty && children.isEmpty
@@ -68,8 +69,9 @@ private[akka] final case class WildcardTree[T](data: Option[T] = None,
}
}
- @tailrec def findWithTerminalDoubleWildcard(elems: Iterator[String],
- alt: WildcardTree[T] = WildcardTree[T]()): WildcardTree[T] = {
+ @tailrec def findWithTerminalDoubleWildcard(
+ elems: Iterator[String],
+ alt: WildcardTree[T] = WildcardTree[T]()): WildcardTree[T] = {
if (!elems.hasNext) this
else {
val newAlt = children.getOrElse("**", alt)
diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala
index 6cd55cf7e9..db2c2eb429 100644
--- a/akka-agent/src/main/scala/akka/agent/Agent.scala
+++ b/akka-agent/src/main/scala/akka/agent/Agent.scala
@@ -8,15 +8,17 @@ import scala.concurrent.stm._
import scala.concurrent.{ ExecutionContext, Future, Promise }
import akka.util.SerializedSuspendableExecutionContext
-@deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
- since = "2.5.0")
+@deprecated(
+ "Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
+ since = "2.5.0")
object Agent {
/**
* Factory method for creating an Agent.
*/
- @deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
- since = "2.5.0")
+ @deprecated(
+ "Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
+ since = "2.5.0")
def apply[T](initialValue: T)(implicit context: ExecutionContext): Agent[T] = new SecretAgent(initialValue, context)
/**
@@ -24,8 +26,9 @@ object Agent {
* @deprecated Agents are deprecated and scheduled for removal in the next major version, use Actors instead.i
*/
@Deprecated
- @deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
- since = "2.5.0")
+ @deprecated(
+ "Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
+ since = "2.5.0")
def create[T](initialValue: T, context: ExecutionContext): Agent[T] = Agent(initialValue)(context)
/**
@@ -170,8 +173,9 @@ object Agent {
*
* @deprecated Agents are deprecated and scheduled for removal in the next major version, use Actors instead.
*/
-@deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
- since = "2.5.0")
+@deprecated(
+ "Agents are deprecated and scheduled for removal in the next major version, use Actors instead.",
+ since = "2.5.0")
abstract class Agent[T] {
/**
diff --git a/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala b/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala
index ded76e8684..6981b4948b 100644
--- a/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala
+++ b/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala
@@ -20,10 +20,11 @@ object TypedBenchmarkActors {
Behaviors.same
}
- private def echoSender(messagesPerPair: Int,
- onDone: ActorRef[Done],
- batchSize: Int,
- childProps: Props): Behavior[Message.type] =
+ private def echoSender(
+ messagesPerPair: Int,
+ onDone: ActorRef[Done],
+ batchSize: Int,
+ childProps: Props): Behavior[Message.type] =
Behaviors.setup { ctx =>
val echo = ctx.spawn(echoBehavior(ctx.self), "echo", childProps)
var left = messagesPerPair / 2
@@ -57,11 +58,12 @@ object TypedBenchmarkActors {
case class Start(respondTo: ActorRef[Completed])
case class Completed(startNanoTime: Long)
- def echoActorsSupervisor(numMessagesPerActorPair: Int,
- numActors: Int,
- dispatcher: String,
- batchSize: Int,
- shutdownTimeout: FiniteDuration): Behavior[Start] =
+ def echoActorsSupervisor(
+ numMessagesPerActorPair: Int,
+ numActors: Int,
+ dispatcher: String,
+ batchSize: Int,
+ shutdownTimeout: FiniteDuration): Behavior[Start] =
Behaviors.receive { (ctx, msg) =>
msg match {
case Start(respondTo) =>
@@ -73,11 +75,12 @@ object TypedBenchmarkActors {
}
}
- private def startEchoBenchSession(messagesPerPair: Int,
- numActors: Int,
- dispatcher: String,
- batchSize: Int,
- respondTo: ActorRef[Completed]): Behavior[Unit] = {
+ private def startEchoBenchSession(
+ messagesPerPair: Int,
+ numActors: Int,
+ dispatcher: String,
+ batchSize: Int,
+ respondTo: ActorRef[Completed]): Behavior[Unit] = {
val numPairs = numActors / 2
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala
index 05b7720265..84b205dfc9 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala
@@ -37,9 +37,10 @@ class ActorBenchmark {
//@Param(Array("akka.actor.ManyToOneArrayMailbox"))
@Param(
- Array("akka.dispatch.SingleConsumerOnlyUnboundedMailbox",
- "akka.actor.ManyToOneArrayMailbox",
- "akka.actor.JCToolsMailbox"))
+ Array(
+ "akka.dispatch.SingleConsumerOnlyUnboundedMailbox",
+ "akka.actor.ManyToOneArrayMailbox",
+ "akka.actor.JCToolsMailbox"))
var mailbox = ""
@Param(Array("fjp-dispatcher")) // @Param(Array("fjp-dispatcher", "affinity-dispatcher"))
@@ -52,8 +53,9 @@ class ActorBenchmark {
requireRightNumberOfCores(threads)
- system = ActorSystem("ActorBenchmark",
- ConfigFactory.parseString(s"""
+ system = ActorSystem(
+ "ActorBenchmark",
+ ConfigFactory.parseString(s"""
akka.actor {
default-mailbox.mailbox-capacity = 512
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala
index 5647cf87c2..df0a350db9 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala
@@ -45,8 +45,9 @@ class AffinityPoolComparativeBenchmark {
s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}""""
}
- system = ActorSystem("AffinityPoolComparativeBenchmark",
- ConfigFactory.parseString(s"""| akka {
+ system = ActorSystem(
+ "AffinityPoolComparativeBenchmark",
+ ConfigFactory.parseString(s"""| akka {
| log-dead-letters = off
| actor {
| default-fj-dispatcher {
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala
index 714775f41a..c1a043d728 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala
@@ -35,8 +35,9 @@ class AffinityPoolIdleCPULevelBenchmark {
requireRightNumberOfCores(numThreads)
- system = ActorSystem("AffinityPoolWaitingStrategyBenchmark",
- ConfigFactory.parseString(s""" | akka {
+ system = ActorSystem(
+ "AffinityPoolWaitingStrategyBenchmark",
+ ConfigFactory.parseString(s""" | akka {
| log-dead-letters = off
| actor {
| affinity-dispatcher {
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala
index 652d4e5f51..ce140b121d 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala
@@ -49,8 +49,9 @@ class AffinityPoolRequestResponseBenchmark {
s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}""""
}
- system = ActorSystem("AffinityPoolComparativeBenchmark",
- ConfigFactory.parseString(s"""| akka {
+ system = ActorSystem(
+ "AffinityPoolComparativeBenchmark",
+ ConfigFactory.parseString(s"""| akka {
| log-dead-letters = off
| actor {
| default-fj-dispatcher {
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala
index 28bf2ad2cf..10b5f404ed 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala
@@ -139,14 +139,16 @@ object BenchmarkActors {
}
def requireRightNumberOfCores(numCores: Int) =
- require(Runtime.getRuntime.availableProcessors == numCores,
- s"Update the cores constant to ${Runtime.getRuntime.availableProcessors}")
+ require(
+ Runtime.getRuntime.availableProcessors == numCores,
+ s"Update the cores constant to ${Runtime.getRuntime.availableProcessors}")
- def benchmarkPingPongActors(numMessagesPerActorPair: Int,
- numActors: Int,
- dispatcher: String,
- throughPut: Int,
- shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = {
+ def benchmarkPingPongActors(
+ numMessagesPerActorPair: Int,
+ numActors: Int,
+ dispatcher: String,
+ throughPut: Int,
+ shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = {
val numPairs = numActors / 2
val totalNumMessages = numPairs * numMessagesPerActorPair
val (actors, latch) = startPingPongActorPairs(numMessagesPerActorPair, numPairs, dispatcher)
@@ -156,11 +158,12 @@ object BenchmarkActors {
printProgress(totalNumMessages, numActors, startNanoTime)
}
- def benchmarkEchoActors(numMessagesPerActorPair: Int,
- numActors: Int,
- dispatcher: String,
- batchSize: Int,
- shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = {
+ def benchmarkEchoActors(
+ numMessagesPerActorPair: Int,
+ numActors: Int,
+ dispatcher: String,
+ batchSize: Int,
+ shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = {
val numPairs = numActors / 2
val totalNumMessages = numPairs * numMessagesPerActorPair
val (actors, latch) = startEchoActorPairs(numMessagesPerActorPair, numPairs, dispatcher, batchSize)
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala
index 8a17b62ffd..e1a1692400 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala
@@ -29,9 +29,10 @@ class ForkJoinActorBenchmark {
var threads = ""
@Param(
- Array("akka.dispatch.SingleConsumerOnlyUnboundedMailbox",
- "akka.actor.ManyToOneArrayMailbox",
- "akka.actor.JCToolsMailbox"))
+ Array(
+ "akka.dispatch.SingleConsumerOnlyUnboundedMailbox",
+ "akka.actor.ManyToOneArrayMailbox",
+ "akka.actor.JCToolsMailbox"))
var mailbox = ""
implicit var system: ActorSystem = _
@@ -41,8 +42,9 @@ class ForkJoinActorBenchmark {
requireRightNumberOfCores(cores)
- system = ActorSystem("ForkJoinActorBenchmark",
- ConfigFactory.parseString(s"""
+ system = ActorSystem(
+ "ForkJoinActorBenchmark",
+ ConfigFactory.parseString(s"""
akka {
log-dead-letters = off
default-mailbox.mailbox-capacity = 512
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala
index c6689f9574..e62341cf09 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala
@@ -26,8 +26,9 @@ class TellOnlyBenchmark {
@Setup(Level.Trial)
def setup(): Unit = {
- system = ActorSystem("TellOnlyBenchmark",
- ConfigFactory.parseString(s"""| akka {
+ system = ActorSystem(
+ "TellOnlyBenchmark",
+ ConfigFactory.parseString(s"""| akka {
| log-dead-letters = off
| actor {
| default-dispatcher {
@@ -119,18 +120,20 @@ object TellOnlyBenchmark {
new DroppingMessageQueue
}
- class DroppingDispatcher(_configurator: MessageDispatcherConfigurator,
- _id: String,
- _throughput: Int,
- _throughputDeadlineTime: Duration,
- _executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
- _shutdownTimeout: FiniteDuration)
- extends Dispatcher(_configurator,
- _id,
- _throughput,
- _throughputDeadlineTime,
- _executorServiceFactoryProvider,
- _shutdownTimeout) {
+ class DroppingDispatcher(
+ _configurator: MessageDispatcherConfigurator,
+ _id: String,
+ _throughput: Int,
+ _throughputDeadlineTime: Duration,
+ _executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
+ _shutdownTimeout: FiniteDuration)
+ extends Dispatcher(
+ _configurator,
+ _id,
+ _throughput,
+ _throughputDeadlineTime,
+ _executorServiceFactoryProvider,
+ _shutdownTimeout) {
override protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = {
val mbox = receiver.mailbox
@@ -146,11 +149,12 @@ object TellOnlyBenchmark {
extends MessageDispatcherConfigurator(config, prerequisites) {
override def dispatcher(): MessageDispatcher =
- new DroppingDispatcher(this,
- config.getString("id"),
- config.getInt("throughput"),
- config.getNanosDuration("throughput-deadline-time"),
- configureExecutor(),
- config.getMillisDuration("shutdown-timeout"))
+ new DroppingDispatcher(
+ this,
+ config.getString("id"),
+ config.getInt("throughput"),
+ config.getNanosDuration("throughput-deadline-time"),
+ configureExecutor(),
+ config.getMillisDuration("shutdown-timeout"))
}
}
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala
index 946d425bf1..8aefb31ebe 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala
@@ -100,9 +100,10 @@ class LevelDbBatchingBenchmark {
private def deleteStorage(sys: ActorSystem): Unit = {
val storageLocations =
- List("akka.persistence.journal.leveldb.dir",
- "akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s => new File(sys.settings.config.getString(s)))
+ List(
+ "akka.persistence.journal.leveldb.dir",
+ "akka.persistence.journal.leveldb-shared.store.dir",
+ "akka.persistence.snapshot-store.local.dir").map(s => new File(sys.settings.config.getString(s)))
storageLocations.foreach(FileUtils.deleteDirectory)
}
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala
index 43082f3ddc..896cc40191 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala
@@ -31,9 +31,10 @@ class PersistentActorDeferBenchmark {
val config = PersistenceSpec.config("leveldb", "benchmark")
lazy val storageLocations =
- List("akka.persistence.journal.leveldb.dir",
- "akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
+ List(
+ "akka.persistence.journal.leveldb.dir",
+ "akka.persistence.journal.leveldb-shared.store.dir",
+ "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
var system: ActorSystem = _
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala
index 872959f9f4..73abc534e7 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala
@@ -20,9 +20,10 @@ class PersistentActorThroughputBenchmark {
val config = PersistenceSpec.config("leveldb", "benchmark")
lazy val storageLocations =
- List("akka.persistence.journal.leveldb.dir",
- "akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
+ List(
+ "akka.persistence.journal.leveldb.dir",
+ "akka.persistence.journal.leveldb-shared.store.dir",
+ "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
var system: ActorSystem = _
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala
index 0ca931c90d..8547136d29 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala
@@ -21,9 +21,10 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
val config = PersistenceSpec.config("leveldb", "benchmark")
lazy val storageLocations =
- List("akka.persistence.journal.leveldb.dir",
- "akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
+ List(
+ "akka.persistence.journal.leveldb.dir",
+ "akka.persistence.journal.leveldb-shared.store.dir",
+ "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
var system: ActorSystem = _
@@ -90,9 +91,10 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
}
}
-class NoPersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int,
- val upStream: ActorRef,
- val downStream: ActorPath)
+class NoPersistPersistentActorWithAtLeastOnceDelivery(
+ respondAfter: Int,
+ val upStream: ActorRef,
+ val downStream: ActorPath)
extends PersistentActor
with AtLeastOnceDelivery {
@@ -126,9 +128,10 @@ class NoPersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int,
}
}
-class PersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int,
- val upStream: ActorRef,
- val downStream: ActorPath)
+class PersistPersistentActorWithAtLeastOnceDelivery(
+ respondAfter: Int,
+ val upStream: ActorRef,
+ val downStream: ActorPath)
extends PersistentActor
with AtLeastOnceDelivery {
@@ -164,9 +167,10 @@ class PersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int,
}
}
-class PersistAsyncPersistentActorWithAtLeastOnceDelivery(respondAfter: Int,
- val upStream: ActorRef,
- val downStream: ActorPath)
+class PersistAsyncPersistentActorWithAtLeastOnceDelivery(
+ respondAfter: Int,
+ val upStream: ActorRef,
+ val downStream: ActorPath)
extends PersistentActor
with AtLeastOnceDelivery {
diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala
index b301b0c058..b92435f87b 100644
--- a/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala
@@ -104,8 +104,9 @@ class CodecBenchmark {
val settings = ActorMaterializerSettings(system)
materializer = ActorMaterializer(settings)(system)
- uniqueLocalAddress = UniqueAddress(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress,
- AddressUidExtension(system).longAddressUid)
+ uniqueLocalAddress = UniqueAddress(
+ system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress,
+ AddressUidExtension(system).longAddressUid)
val actorOnSystemA = system.actorOf(Props.empty, "a")
senderStringA = actorOnSystemA.path.toSerializationFormatWithAddress(uniqueLocalAddress.address)
@@ -136,24 +137,26 @@ class CodecBenchmark {
// Now build up the graphs
val encoder: Flow[OutboundEnvelope, EnvelopeBuffer, Encoder.OutboundCompressionAccess] =
Flow.fromGraph(
- new Encoder(uniqueLocalAddress,
- system.asInstanceOf[ExtendedActorSystem],
- outboundEnvelopePool,
- envelopePool,
- streamId = 1,
- debugLogSend = false,
- version = ArteryTransport.HighestVersion))
+ new Encoder(
+ uniqueLocalAddress,
+ system.asInstanceOf[ExtendedActorSystem],
+ outboundEnvelopePool,
+ envelopePool,
+ streamId = 1,
+ debugLogSend = false,
+ version = ArteryTransport.HighestVersion))
val encoderInput: Flow[String, OutboundEnvelope, NotUsed] =
Flow[String].map(msg => outboundEnvelopePool.acquire().init(OptionVal.None, payload, OptionVal.Some(remoteRefB)))
val compressions = new InboundCompressionsImpl(system, inboundContext, inboundContext.settings.Advanced.Compression)
val decoder: Flow[EnvelopeBuffer, InboundEnvelope, InboundCompressionAccess] =
Flow.fromGraph(
- new Decoder(inboundContext,
- system.asInstanceOf[ExtendedActorSystem],
- uniqueLocalAddress,
- inboundContext.settings,
- compressions,
- inboundEnvelopePool))
+ new Decoder(
+ inboundContext,
+ system.asInstanceOf[ExtendedActorSystem],
+ uniqueLocalAddress,
+ inboundContext.settings,
+ compressions,
+ inboundEnvelopePool))
val deserializer: Flow[InboundEnvelope, InboundEnvelope, NotUsed] =
Flow.fromGraph(new Deserializer(inboundContext, system.asInstanceOf[ExtendedActorSystem], envelopePool))
val decoderInput: Flow[String, EnvelopeBuffer, NotUsed] = Flow[String].map { _ =>
@@ -295,18 +298,20 @@ object CodecBenchmark {
override def identifier: Byte = 7 // Lucky number slevin
- override def remoteWriteMetadata(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- buffer: ByteBuffer): Unit = {
+ override def remoteWriteMetadata(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ buffer: ByteBuffer): Unit = {
buffer.putInt(Metadata.length)
buffer.put(Metadata)
}
- override def remoteReadMetadata(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- buffer: ByteBuffer): Unit = {
+ override def remoteReadMetadata(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ buffer: ByteBuffer): Unit = {
val length = Metadata.length
val metaLength = buffer.getInt
@tailrec
@@ -319,16 +324,18 @@ object CodecBenchmark {
throw new IOException(s"DummyInstrument deserialization error. Expected ${Metadata.toString}")
}
- override def remoteMessageSent(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- size: Int,
- time: Long): Unit = ()
+ override def remoteMessageSent(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ size: Int,
+ time: Long): Unit = ()
- override def remoteMessageReceived(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- size: Int,
- time: Long): Unit = ()
+ override def remoteMessageReceived(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ size: Int,
+ time: Long): Unit = ()
}
}
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala
index a4606cdc63..5f38a07d45 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala
@@ -56,18 +56,19 @@ object InterpreterBenchmark {
override val out: akka.stream.Outlet[T] = Outlet[T]("out")
out.id = 0
- setHandler(out,
- new OutHandler {
- override def onPull(): Unit = {
- if (idx < data.size) {
- push(out, data(idx))
- idx += 1
- } else {
- completeStage()
- }
- }
- override def onDownstreamFinish(): Unit = completeStage()
- })
+ setHandler(
+ out,
+ new OutHandler {
+ override def onPull(): Unit = {
+ if (idx < data.size) {
+ push(out, data(idx))
+ idx += 1
+ } else {
+ completeStage()
+ }
+ }
+ override def onDownstreamFinish(): Unit = completeStage()
+ })
}
case class GraphDataSink[T](override val toString: String, var expected: Int)
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala
index 72475fd945..eeb7f08fc6 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala
@@ -68,9 +68,10 @@ class PartitionHubBenchmark {
val latch = new CountDownLatch(NumberOfStreams)
val source = testSource.runWith(
- PartitionHub.sink[java.lang.Integer]((size, elem) => elem.intValue % NumberOfStreams,
- startAfterNrOfConsumers = NumberOfStreams,
- bufferSize = BufferSize))(materializer)
+ PartitionHub.sink[java.lang.Integer](
+ (size, elem) => elem.intValue % NumberOfStreams,
+ startAfterNrOfConsumers = NumberOfStreams,
+ bufferSize = BufferSize))(materializer)
for (_ <- 0 until NumberOfStreams)
source.runWith(new LatchSink(N / NumberOfStreams, latch))(materializer)
diff --git a/akka-camel/src/main/scala/akka/camel/Camel.scala b/akka-camel/src/main/scala/akka/camel/Camel.scala
index 3adf147507..c9aeaf0ca4 100644
--- a/akka-camel/src/main/scala/akka/camel/Camel.scala
+++ b/akka-camel/src/main/scala/akka/camel/Camel.scala
@@ -98,14 +98,15 @@ class CamelSettings private[camel] (config: Config, dynamicAccess: DynamicAccess
}
val conversions = specifiedConversions.foldLeft(Map[String, Class[_ <: AnyRef]]()) {
case (m, (key, fqcn)) =>
- m.updated(key,
- dynamicAccess
- .getClassFor[AnyRef](fqcn)
- .recover {
- case e =>
- throw new ConfigurationException("Could not find/load Camel Converter class [" + fqcn + "]", e)
- }
- .get)
+ m.updated(
+ key,
+ dynamicAccess
+ .getClassFor[AnyRef](fqcn)
+ .recover {
+ case e =>
+ throw new ConfigurationException("Could not find/load Camel Converter class [" + fqcn + "]", e)
+ }
+ .get)
}
(s: String, r: RouteDefinition) => conversions.get(s).fold(r)(r.convertBodyTo)
diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala
index e27833b12b..03daff459d 100644
--- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala
+++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala
@@ -271,9 +271,10 @@ object CamelMessage extends ((Any, Map[String, Any]) => CamelMessage) {
* in the Camel message.
*/
private[camel] def from(camelMessage: JCamelMessage, headers: Map[String, Any]): CamelMessage =
- CamelMessage(camelMessage.getBody,
- headers ++ camelMessage.getHeaders.asScala,
- camelMessage.getAttachments.asScala.toMap)
+ CamelMessage(
+ camelMessage.getBody,
+ headers ++ camelMessage.getHeaders.asScala,
+ camelMessage.getAttachments.asScala.toMap)
/**
* Creates a new CamelMessageWithAttachments object from the Camel message.
@@ -283,12 +284,14 @@ object CamelMessage extends ((Any, Map[String, Any]) => CamelMessage) {
* @param attachments additional attachments to set on the created CamelMessageWithAttachments in addition to those
* in the Camel message.
*/
- private[camel] def from(camelMessage: JCamelMessage,
- headers: Map[String, Any],
- attachments: Map[String, DataHandler]): CamelMessage =
- CamelMessage(camelMessage.getBody,
- headers ++ camelMessage.getHeaders.asScala,
- attachments ++ camelMessage.getAttachments.asScala)
+ private[camel] def from(
+ camelMessage: JCamelMessage,
+ headers: Map[String, Any],
+ attachments: Map[String, DataHandler]): CamelMessage =
+ CamelMessage(
+ camelMessage.getBody,
+ headers ++ camelMessage.getHeaders.asScala,
+ attachments ++ camelMessage.getAttachments.asScala)
/**
* INTERNAL API
diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala
index 269327b26b..b5f556773c 100644
--- a/akka-camel/src/main/scala/akka/camel/Consumer.scala
+++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala
@@ -38,9 +38,10 @@ trait Consumer extends Actor with CamelSupport {
}
private[this] def register(): Unit = {
- camel.supervisor ! Register(self,
- endpointUri,
- Some(ConsumerConfig(activationTimeout, replyTimeout, autoAck, onRouteDefinition)))
+ camel.supervisor ! Register(
+ self,
+ endpointUri,
+ Some(ConsumerConfig(activationTimeout, replyTimeout, autoAck, onRouteDefinition)))
}
/**
@@ -98,17 +99,19 @@ private[camel] object Consumer {
*
* Was a case class but has been split up as a workaround for SI-8283
*/
-private[camel] class ConsumerConfig(val activationTimeout: FiniteDuration,
- val replyTimeout: FiniteDuration,
- val autoAck: Boolean,
- val onRouteDefinition: RouteDefinition => ProcessorDefinition[_])
+private[camel] class ConsumerConfig(
+ val activationTimeout: FiniteDuration,
+ val replyTimeout: FiniteDuration,
+ val autoAck: Boolean,
+ val onRouteDefinition: RouteDefinition => ProcessorDefinition[_])
extends NoSerializationVerificationNeeded
with scala.Serializable
private[camel] object ConsumerConfig {
- def apply(activationTimeout: FiniteDuration,
- replyTimeout: FiniteDuration,
- autoAck: Boolean,
- onRouteDefinition: RouteDefinition => ProcessorDefinition[_]): ConsumerConfig =
+ def apply(
+ activationTimeout: FiniteDuration,
+ replyTimeout: FiniteDuration,
+ autoAck: Boolean,
+ onRouteDefinition: RouteDefinition => ProcessorDefinition[_]): ConsumerConfig =
new ConsumerConfig(activationTimeout, replyTimeout, autoAck, onRouteDefinition)
}
diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala
index e6078dacee..299a896501 100644
--- a/akka-camel/src/main/scala/akka/camel/Producer.scala
+++ b/akka-camel/src/main/scala/akka/camel/Producer.scala
@@ -136,15 +136,16 @@ trait ProducerSupport extends Actor with CamelSupport {
val cmsg = CamelMessage.canonicalize(msg)
xchg.setRequest(cmsg)
- processor.process(xchg.exchange,
- new AsyncCallback {
- // Ignoring doneSync, sending back async uniformly.
- def done(doneSync: Boolean): Unit =
- producer.tell(
- if (xchg.exchange.isFailed) xchg.toFailureResult(cmsg.headers(headersToCopy))
- else MessageResult(xchg.toResponseMessage(cmsg.headers(headersToCopy))),
- originalSender)
- })
+ processor.process(
+ xchg.exchange,
+ new AsyncCallback {
+ // Ignoring doneSync, sending back async uniformly.
+ def done(doneSync: Boolean): Unit =
+ producer.tell(
+ if (xchg.exchange.isFailed) xchg.toFailureResult(cmsg.headers(headersToCopy))
+ else MessageResult(xchg.toResponseMessage(cmsg.headers(headersToCopy))),
+ originalSender)
+ })
}
}
}
diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala
index 0acdb8827d..e18ef47c53 100644
--- a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala
+++ b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala
@@ -104,10 +104,11 @@ private[camel] class Registry(activationTracker: ActorRef) extends Actor with Ca
private var consumers = Set[ActorRef]()
class RegistryLogStrategy()(_decider: SupervisorStrategy.Decider) extends OneForOneStrategy()(_decider) {
- override def logFailure(context: ActorContext,
- child: ActorRef,
- cause: Throwable,
- decision: SupervisorStrategy.Directive): Unit =
+ override def logFailure(
+ context: ActorContext,
+ child: ActorRef,
+ cause: Throwable,
+ decision: SupervisorStrategy.Directive): Unit =
cause match {
case _: ActorActivationException | _: ActorDeActivationException =>
try context.system.eventStream.publish {
diff --git a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala
index 9b5688a140..0fe6f0a6ec 100644
--- a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala
+++ b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala
@@ -20,18 +20,20 @@ import scala.language.existentials
*
*
*/
-private[camel] class ConsumerActorRouteBuilder(endpointUri: String,
- consumer: ActorRef,
- config: ConsumerConfig,
- settings: CamelSettings)
+private[camel] class ConsumerActorRouteBuilder(
+ endpointUri: String,
+ consumer: ActorRef,
+ config: ConsumerConfig,
+ settings: CamelSettings)
extends RouteBuilder {
protected def targetActorUri = CamelPath.toUri(consumer, config.autoAck, config.replyTimeout)
def configure(): Unit =
applyUserRouteCustomization(
- settings.Conversions.apply(endpointUri.take(endpointUri.indexOf(":")), // e.g. "http" from "http://whatever/..."
- from(endpointUri).routeId(consumer.path.toString))).to(targetActorUri)
+ settings.Conversions.apply(
+ endpointUri.take(endpointUri.indexOf(":")), // e.g. "http" from "http://whatever/..."
+ from(endpointUri).routeId(consumer.path.toString))).to(targetActorUri)
def applyUserRouteCustomization(rd: RouteDefinition) = config.onRouteDefinition(rd)
}
diff --git a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala
index 0a04aa32e2..4b183dbc44 100644
--- a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala
+++ b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala
@@ -98,8 +98,8 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel
* @param endpoint the endpoint to be deactivated
* @param timeout the timeout of the Future
*/
- def deactivationFutureFor(endpoint: ActorRef)(implicit timeout: Timeout,
- executor: ExecutionContext): Future[ActorRef] =
+ def deactivationFutureFor(
+ endpoint: ActorRef)(implicit timeout: Timeout, executor: ExecutionContext): Future[ActorRef] =
(supervisor
.ask(AwaitDeActivation(endpoint))(timeout))
.map[ActorRef]({
diff --git a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala
index 0e1d40bedb..2cc3fe3d5d 100644
--- a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala
@@ -95,8 +95,9 @@ class ConsumerBroadcast(promise: Promise[(Future[List[List[ActorRef]]], Future[L
allActivationFutures = allActivationFutures :+ activationListFuture
allDeactivationFutures = allDeactivationFutures :+ deactivationListFuture
val routee =
- context.actorOf(Props(classOf[Registrar], i, number, activationListPromise, deactivationListPromise),
- "registrar-" + i)
+ context.actorOf(
+ Props(classOf[Registrar], i, number, activationListPromise, deactivationListPromise),
+ "registrar-" + i)
routee.path.toString
}
promise.success(Future.sequence(allActivationFutures) -> Future.sequence(allDeactivationFutures))
@@ -113,10 +114,11 @@ final case class DeRegisterConsumersAndProducers()
final case class Activations()
final case class DeActivations()
-class Registrar(val start: Int,
- val number: Int,
- activationsPromise: Promise[List[ActorRef]],
- deActivationsPromise: Promise[List[ActorRef]])
+class Registrar(
+ val start: Int,
+ val number: Int,
+ activationsPromise: Promise[List[ActorRef]],
+ deActivationsPromise: Promise[List[ActorRef]])
extends Actor
with ActorLogging {
private var actorRefs = Set[ActorRef]()
diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala
index 79e5252005..54bd6d3142 100644
--- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala
@@ -94,8 +94,9 @@ class ProducerFeatureTest
}
"03 produce a message oneway" in {
- val producer = system.actorOf(Props(new TestProducer("direct:producer-test-1", true) with Oneway),
- name = "03-direct-producer-1-oneway")
+ val producer = system.actorOf(
+ Props(new TestProducer("direct:producer-test-1", true) with Oneway),
+ name = "03-direct-producer-1-oneway")
mockEndpoint.expectedBodiesReceived("TEST")
producer ! CamelMessage("test", Map())
mockEndpoint.assertIsSatisfied()
@@ -104,8 +105,9 @@ class ProducerFeatureTest
"04 produces message twoway without sender reference" in {
// this test causes a dead letter which can be ignored. The producer is two-way but a oneway tell is used
// to communicate with it and the response is ignored, which ends up in a dead letter
- val producer = system.actorOf(Props(new TestProducer("direct:producer-test-1")),
- name = "04-ignore-this-deadletter-direct-producer-test-no-sender")
+ val producer = system.actorOf(
+ Props(new TestProducer("direct:producer-test-1")),
+ name = "04-ignore-this-deadletter-direct-producer-test-no-sender")
mockEndpoint.expectedBodiesReceived("test")
producer ! CamelMessage("test", Map())
mockEndpoint.assertIsSatisfied()
@@ -123,8 +125,9 @@ class ProducerFeatureTest
}
"11 produce message to direct:producer-test-3 and receive failure response" in {
- val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")),
- name = "11-direct-producer-test-3-receive-failure")
+ val producer = system.actorOf(
+ Props(new TestProducer("direct:producer-test-3")),
+ name = "11-direct-producer-test-3-receive-failure")
val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
@@ -139,8 +142,9 @@ class ProducerFeatureTest
"12 produce message, forward normal response of direct:producer-test-2 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "12-reply-forwarding-target")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)),
- name = "12-direct-producer-test-2-forwarder")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-2", target)),
+ name = "12-direct-producer-test-2-forwarder")
val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123"))
producer.tell(message, testActor)
expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result")))
@@ -148,8 +152,9 @@ class ProducerFeatureTest
"13 produce message, forward failure response of direct:producer-test-2 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "13-reply-forwarding-target")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)),
- name = "13-direct-producer-test-2-forwarder-failure")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-2", target)),
+ name = "13-direct-producer-test-2-forwarder-failure")
val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
@@ -164,8 +169,9 @@ class ProducerFeatureTest
"14 produce message, forward normal response to a producing target actor and produce response to direct:forward-test-1" in {
val target = system.actorOf(Props[ProducingForwardTarget], name = "14-producer-forwarding-target")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)),
- name = "14-direct-producer-test-2-forwarder-to-producing-target")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-2", target)),
+ name = "14-direct-producer-test-2-forwarder-to-producing-target")
mockEndpoint.expectedBodiesReceived("received test")
producer.tell(CamelMessage("test", Map()), producer)
mockEndpoint.assertIsSatisfied()
@@ -173,8 +179,9 @@ class ProducerFeatureTest
"15 produce message, forward failure response to a producing target actor and produce response to direct:forward-test-1" in {
val target = system.actorOf(Props[ProducingForwardTarget], name = "15-producer-forwarding-target-failure")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)),
- name = "15-direct-producer-test-2-forward-failure")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-2", target)),
+ name = "15-direct-producer-test-2-forward-failure")
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
mockEndpoint.expectedMessageCount(1)
mockEndpoint.message(0).body().isInstanceOf(classOf[akka.actor.Status.Failure])
@@ -185,8 +192,9 @@ class ProducerFeatureTest
"16 produce message, forward normal response from direct:producer-test-3 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "16-reply-forwarding-target")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)),
- name = "16-direct-producer-test-3-to-replying-actor")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-3", target)),
+ name = "16-direct-producer-test-3-to-replying-actor")
val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123"))
producer.tell(message, testActor)
@@ -195,8 +203,9 @@ class ProducerFeatureTest
"17 produce message, forward failure response from direct:producer-test-3 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "17-reply-forwarding-target")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)),
- name = "17-direct-producer-test-3-forward-failure")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-3", target)),
+ name = "17-direct-producer-test-3-forward-failure")
val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
@@ -211,8 +220,9 @@ class ProducerFeatureTest
"18 produce message, forward normal response from direct:producer-test-3 to a producing target actor and produce response to direct:forward-test-1" in {
val target = system.actorOf(Props[ProducingForwardTarget], "18-producing-forward-target-normal")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)),
- name = "18-direct-producer-test-3-forward-normal")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-3", target)),
+ name = "18-direct-producer-test-3-forward-normal")
mockEndpoint.expectedBodiesReceived("received test")
producer.tell(CamelMessage("test", Map()), producer)
mockEndpoint.assertIsSatisfied()
@@ -220,8 +230,9 @@ class ProducerFeatureTest
"19 produce message, forward failure response from direct:producer-test-3 to a producing target actor and produce response to direct:forward-test-1" in {
val target = system.actorOf(Props[ProducingForwardTarget], "19-producing-forward-target-failure")
- val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)),
- name = "19-direct-producer-test-3-forward-failure-producing-target")
+ val producer = system.actorOf(
+ Props(new TestForwarder("direct:producer-test-3", target)),
+ name = "19-direct-producer-test-3-forward-failure-producing-target")
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
mockEndpoint.expectedMessageCount(1)
mockEndpoint.message(0).body().isInstanceOf(classOf[akka.actor.Status.Failure])
@@ -252,8 +263,9 @@ class ProducerFeatureTest
import TestSupport._
filterEvents(EventFilter[Exception](occurrences = 1)) {
val producerSupervisor =
- system.actorOf(Props(new ProducerSupervisor(Props(new ChildProducer("mock:mock", true)))),
- "21-ignore-deadletter-sender-ref-test")
+ system.actorOf(
+ Props(new ProducerSupervisor(Props(new ChildProducer("mock:mock", true)))),
+ "21-ignore-deadletter-sender-ref-test")
mockEndpoint.reset()
producerSupervisor.tell(CamelMessage("test", Map()), testActor)
producerSupervisor.tell(CamelMessage("err", Map()), testActor)
diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala
index 705186cefe..fbc5b9778d 100644
--- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala
@@ -21,10 +21,11 @@ class ActorComponentConfigurationTest extends WordSpec with Matchers with Shared
.createEndpoint(s"akka://test/user/$$a?autoAck=false&replyTimeout=987000000+nanos")
.asInstanceOf[ActorEndpointConfig]
- actorEndpointConfig should have('endpointUri (s"akka://test/user/$$a?autoAck=false&replyTimeout=987000000+nanos"),
- 'path (ActorEndpointPath.fromCamelPath(s"akka://test/user/$$a")),
- 'autoAck (false),
- 'replyTimeout (987000000 nanos))
+ actorEndpointConfig should have(
+ 'endpointUri (s"akka://test/user/$$a?autoAck=false&replyTimeout=987000000+nanos"),
+ 'path (ActorEndpointPath.fromCamelPath(s"akka://test/user/$$a")),
+ 'autoAck (false),
+ 'replyTimeout (987000000 nanos))
}
}
diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala
index 3921e8459e..5bb716c561 100644
--- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala
@@ -360,7 +360,8 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft
override val log = mock[MarkerLoggingAdapter]
override lazy val template = mock[ProducerTemplate]
override lazy val context = mock[DefaultCamelContext]
- override val settings = new CamelSettings(ConfigFactory.parseString("""
+ override val settings = new CamelSettings(
+ ConfigFactory.parseString("""
akka {
camel {
jmx = off
@@ -373,7 +374,7 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft
}
}
""").withFallback(config),
- sys.dynamicAccess)
+ sys.dynamicAccess)
}
camel = camelWithMocks
@@ -391,10 +392,11 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft
def msg(s: String) = CamelMessage(s, Map.empty)
- def given(actor: ActorRef = probe.ref,
- outCapable: Boolean = true,
- autoAck: Boolean = true,
- replyTimeout: FiniteDuration = 20 seconds) = {
+ def given(
+ actor: ActorRef = probe.ref,
+ outCapable: Boolean = true,
+ autoAck: Boolean = true,
+ replyTimeout: FiniteDuration = 20 seconds) = {
prepareMocks(actor, outCapable = outCapable)
new ActorProducer(configure(isAutoAck = autoAck, _replyTimeout = replyTimeout), camel)
}
@@ -425,9 +427,10 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft
}
- def configure(endpointUri: String = "test-uri",
- isAutoAck: Boolean = true,
- _replyTimeout: FiniteDuration = 20 seconds) = {
+ def configure(
+ endpointUri: String = "test-uri",
+ isAutoAck: Boolean = true,
+ _replyTimeout: FiniteDuration = 20 seconds) = {
val endpoint = new ActorEndpoint(endpointUri, actorComponent, actorEndpointPath, camel)
endpoint.autoAck = isAutoAck
endpoint.replyTimeout = _replyTimeout
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
index de92b5233c..bf4fd0a458 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
@@ -163,10 +163,11 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging {
/**
* Start periodic metrics collection
*/
- val sampleTask = scheduler.schedule(PeriodicTasksInitialDelay max CollectorSampleInterval,
- CollectorSampleInterval,
- self,
- MetricsTick)
+ val sampleTask = scheduler.schedule(
+ PeriodicTasksInitialDelay max CollectorSampleInterval,
+ CollectorSampleInterval,
+ self,
+ MetricsTick)
override def preStart(): Unit = {
cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent])
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
index 6c3ff2c791..3e6c506efb 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
@@ -45,8 +45,9 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension {
* Supervision strategy.
*/
private[metrics] val strategy = system.dynamicAccess
- .createInstanceFor[SupervisorStrategy](SupervisorStrategyProvider,
- immutable.Seq(classOf[Config] -> SupervisorStrategyConfiguration))
+ .createInstanceFor[SupervisorStrategy](
+ SupervisorStrategyProvider,
+ immutable.Seq(classOf[Config] -> SupervisorStrategyConfiguration))
.getOrElse {
val log: LoggingAdapter = Logging(system, getClass.getName)
log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
index 7a793739fa..fcfa6ef5d3 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
@@ -34,8 +34,9 @@ import akka.cluster.routing.ClusterRouterSettingsBase
* @param metricsSelector decides what probability to use for selecting a routee, based
* on remaining capacity as indicated by the node metrics
*/
-final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem,
- metricsSelector: MetricsSelector = MixMetricsSelector)
+final case class AdaptiveLoadBalancingRoutingLogic(
+ system: ActorSystem,
+ metricsSelector: MetricsSelector = MixMetricsSelector)
extends RoutingLogic
with NoSerializationVerificationNeeded {
@@ -122,18 +123,19 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem,
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
-final case class AdaptiveLoadBalancingPool(metricsSelector: MetricsSelector = MixMetricsSelector,
- val nrOfInstances: Int = 0,
- override val supervisorStrategy: SupervisorStrategy =
- Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+final case class AdaptiveLoadBalancingPool(
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ val nrOfInstances: Int = 0,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool {
def this(config: Config, dynamicAccess: DynamicAccess) =
- this(nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
- metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
- usePoolDispatcher = config.hasPath("pool-dispatcher"))
+ this(
+ nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
+ metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
@@ -152,8 +154,9 @@ final case class AdaptiveLoadBalancingPool(metricsSelector: MetricsSelector = Mi
override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
Some(
- Props(classOf[AdaptiveLoadBalancingMetricsListener],
- routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
+ Props(
+ classOf[AdaptiveLoadBalancingMetricsListener],
+ routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
/**
* Setting the supervisor strategy to be used for the “head” Router actor.
@@ -205,14 +208,16 @@ final case class AdaptiveLoadBalancingPool(metricsSelector: MetricsSelector = Mi
* router management messages
*/
@SerialVersionUID(1L)
-final case class AdaptiveLoadBalancingGroup(metricsSelector: MetricsSelector = MixMetricsSelector,
- val paths: immutable.Iterable[String] = Nil,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+final case class AdaptiveLoadBalancingGroup(
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ val paths: immutable.Iterable[String] = Nil,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config, dynamicAccess: DynamicAccess) =
- this(metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
- paths = immutableSeq(config.getStringList("routees.paths")))
+ this(
+ metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
+ paths = immutableSeq(config.getStringList("routees.paths")))
/**
* Java API
@@ -231,8 +236,9 @@ final case class AdaptiveLoadBalancingGroup(metricsSelector: MetricsSelector = M
override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
Some(
- Props(classOf[AdaptiveLoadBalancingMetricsListener],
- routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
+ Props(
+ classOf[AdaptiveLoadBalancingMetricsListener],
+ routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
/**
* Setting the dispatcher to be used for the router head actor, which handles
@@ -397,10 +403,11 @@ object MetricsSelector {
.createInstanceFor[MetricsSelector](fqn, args)
.recover({
case exception =>
- throw new IllegalArgumentException((s"Cannot instantiate metrics-selector [$fqn], " +
- "make sure it extends [akka.cluster.routing.MetricsSelector] and " +
- "has constructor with [com.typesafe.config.Config] parameter"),
- exception)
+ throw new IllegalArgumentException(
+ (s"Cannot instantiate metrics-selector [$fqn], " +
+ "make sure it extends [akka.cluster.routing.MetricsSelector] and " +
+ "has constructor with [com.typesafe.config.Config] parameter"),
+ exception)
})
.get
}
@@ -463,9 +470,10 @@ abstract class CapacityMetricsSelector extends MetricsSelector {
*
* Pick routee based on its weight. Higher weight, higher probability.
*/
-private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee],
- selfAddress: Address,
- weights: Map[Address, Int]) {
+private[metrics] class WeightedRoutees(
+ routees: immutable.IndexedSeq[Routee],
+ selfAddress: Address,
+ weights: Map[Address, Int]) {
// fill an array of same size as the refs with accumulated weights,
// binarySearch is used to pick the right bucket from a requested value
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
index 313ce794b3..340e5b6aae 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
@@ -130,11 +130,12 @@ object StandardMetrics {
used <- nodeMetrics.metric(HeapMemoryUsed)
committed <- nodeMetrics.metric(HeapMemoryCommitted)
} yield
- (nodeMetrics.address,
- nodeMetrics.timestamp,
- used.smoothValue.longValue,
- committed.smoothValue.longValue,
- nodeMetrics.metric(HeapMemoryMax).map(_.smoothValue.longValue))
+ (
+ nodeMetrics.address,
+ nodeMetrics.timestamp,
+ used.smoothValue.longValue,
+ committed.smoothValue.longValue,
+ nodeMetrics.metric(HeapMemoryMax).map(_.smoothValue.longValue))
}
}
@@ -181,12 +182,13 @@ object StandardMetrics {
for {
processors <- nodeMetrics.metric(Processors)
} yield
- (nodeMetrics.address,
- nodeMetrics.timestamp,
- nodeMetrics.metric(SystemLoadAverage).map(_.smoothValue),
- nodeMetrics.metric(CpuCombined).map(_.smoothValue),
- nodeMetrics.metric(CpuStolen).map(_.smoothValue),
- processors.value.intValue)
+ (
+ nodeMetrics.address,
+ nodeMetrics.timestamp,
+ nodeMetrics.metric(SystemLoadAverage).map(_.smoothValue),
+ nodeMetrics.metric(CpuCombined).map(_.smoothValue),
+ nodeMetrics.metric(CpuStolen).map(_.smoothValue),
+ processors.value.intValue)
}
}
@@ -214,12 +216,13 @@ object StandardMetrics {
* @param processors the number of available processors
*/
@SerialVersionUID(1L)
- final case class Cpu(address: Address,
- timestamp: Long,
- systemLoadAverage: Option[Double],
- cpuCombined: Option[Double],
- cpuStolen: Option[Double],
- processors: Int) {
+ final case class Cpu(
+ address: Address,
+ timestamp: Long,
+ systemLoadAverage: Option[Double],
+ cpuCombined: Option[Double],
+ cpuStolen: Option[Double],
+ processors: Int) {
cpuCombined match {
case Some(x) => require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]")
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
index 300ed20321..2f72b4a39e 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
@@ -249,8 +249,9 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS
case NumberType.Float_VALUE => jl.Float.intBitsToFloat(number.getValue32)
case NumberType.Integer_VALUE => number.getValue32
case NumberType.Serialized_VALUE =>
- val in = new ClassLoaderObjectInputStream(system.dynamicAccess.classLoader,
- new ByteArrayInputStream(number.getSerialized.toByteArray))
+ val in = new ClassLoaderObjectInputStream(
+ system.dynamicAccess.classLoader,
+ new ByteArrayInputStream(number.getSerialized.toByteArray))
val obj = in.readObject
in.close()
obj.asInstanceOf[jl.Number]
@@ -258,14 +259,16 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS
}
def metricFromProto(metric: cm.NodeMetrics.Metric): Metric =
- Metric(metricNameMapping(metric.getNameIndex),
- numberFromProto(metric.getNumber),
- if (metric.hasEwma) ewmaFromProto(metric.getEwma) else None)
+ Metric(
+ metricNameMapping(metric.getNameIndex),
+ numberFromProto(metric.getNumber),
+ if (metric.hasEwma) ewmaFromProto(metric.getEwma) else None)
def nodeMetricsFromProto(nodeMetrics: cm.NodeMetrics): NodeMetrics =
- NodeMetrics(addressMapping(nodeMetrics.getAddressIndex),
- nodeMetrics.getTimestamp,
- nodeMetrics.getMetricsList.asScala.iterator.map(metricFromProto).to(immutable.Set))
+ NodeMetrics(
+ addressMapping(nodeMetrics.getAddressIndex),
+ nodeMetrics.getTimestamp,
+ nodeMetrics.getMetricsList.asScala.iterator.map(metricFromProto).to(immutable.Set))
val nodeMetrics: Set[NodeMetrics] =
mgossip.getNodeMetricsList.asScala.iterator.map(nodeMetricsFromProto).to(immutable.Set)
@@ -285,12 +288,13 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS
.asInstanceOf[MetricsSelector]
} else MixMetricsSelector
- AdaptiveLoadBalancingPool(metricsSelector = selector,
- nrOfInstances = alb.getNrOfInstances,
- routerDispatcher =
- if (alb.hasRouterDispatcher) alb.getRouterDispatcher
- else Dispatchers.DefaultDispatcherId,
- usePoolDispatcher = alb.getUsePoolDispatcher)
+ AdaptiveLoadBalancingPool(
+ metricsSelector = selector,
+ nrOfInstances = alb.getNrOfInstances,
+ routerDispatcher =
+ if (alb.hasRouterDispatcher) alb.getRouterDispatcher
+ else Dispatchers.DefaultDispatcherId,
+ usePoolDispatcher = alb.getUsePoolDispatcher)
}
def mixMetricSelectorFromBinary(bytes: Array[Byte]): MixMetricsSelector = {
diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
index 241f82b473..fc15d0100e 100644
--- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
+++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
@@ -49,20 +49,22 @@ trait ClusterMetricsCommonConfig extends MultiNodeConfig {
object ClusterMetricsDisabledConfig extends ClusterMetricsCommonConfig {
commonConfig {
- Seq(customLogging,
- disableMetricsExtension,
- debugConfig(on = false),
- MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet).reduceLeft(_.withFallback(_))
+ Seq(
+ customLogging,
+ disableMetricsExtension,
+ debugConfig(on = false),
+ MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet).reduceLeft(_.withFallback(_))
}
}
object ClusterMetricsEnabledConfig extends ClusterMetricsCommonConfig {
commonConfig {
- Seq(customLogging,
- enableMetricsExtension,
- debugConfig(on = false),
- MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet).reduceLeft(_.withFallback(_))
+ Seq(
+ customLogging,
+ enableMetricsExtension,
+ debugConfig(on = false),
+ MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet).reduceLeft(_.withFallback(_))
}
}
diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
index e0b705e35e..d41e3423a3 100644
--- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
+++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
@@ -143,10 +143,10 @@ abstract class AdaptiveLoadBalancingRouterSpec
def startRouter(name: String): ActorRef = {
val router = system.actorOf(
- ClusterRouterPool(local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
- settings = ClusterRouterPoolSettings(totalInstances = 10,
- maxInstancesPerNode = 1,
- allowLocalRoutees = true)).props(Props[Echo]),
+ ClusterRouterPool(
+ local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
+ settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true))
+ .props(Props[Echo]),
name)
// it may take some time until router receives cluster member events
awaitAssert { currentRoutees(router).size should ===(roles.size) }
diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala
index 64bd5b861a..31fd927cbe 100644
--- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala
+++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala
@@ -55,11 +55,13 @@ abstract class StatsService2 extends Actor {
import akka.routing.ConsistentHashingGroup
val workerRouter = context.actorOf(
- ClusterRouterGroup(ConsistentHashingGroup(Nil),
- ClusterRouterGroupSettings(totalInstances = 100,
- routeesPaths = List("/user/statsWorker"),
- allowLocalRoutees = true,
- useRoles = Set("compute"))).props(),
+ ClusterRouterGroup(
+ ConsistentHashingGroup(Nil),
+ ClusterRouterGroupSettings(
+ totalInstances = 100,
+ routeesPaths = List("/user/statsWorker"),
+ allowLocalRoutees = true,
+ useRoles = Set("compute"))).props(),
name = "workerRouter2")
//#router-lookup-in-code
}
@@ -71,10 +73,10 @@ abstract class StatsService3 extends Actor {
import akka.routing.ConsistentHashingPool
val workerRouter = context.actorOf(
- ClusterRouterPool(ConsistentHashingPool(0),
- ClusterRouterPoolSettings(totalInstances = 100,
- maxInstancesPerNode = 3,
- allowLocalRoutees = false)).props(Props[StatsWorker]),
+ ClusterRouterPool(
+ ConsistentHashingPool(0),
+ ClusterRouterPoolSettings(totalInstances = 100, maxInstancesPerNode = 3, allowLocalRoutees = false))
+ .props(Props[StatsWorker]),
name = "workerRouter3")
//#router-deploy-in-code
}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
index cc387c11ad..b1e6d6bc63 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
@@ -56,24 +56,26 @@ class MetricsExtensionSpec extends AkkaSpec(MetricsConfig.clusterSigarMock) with
val history = metricsView.metricsHistory.reverse.map { _.head }
- val expected = List((0.700, 0.000, 0.000),
- (0.700, 0.018, 0.007),
- (0.700, 0.051, 0.020),
- (0.700, 0.096, 0.038),
- (0.700, 0.151, 0.060),
- (0.700, 0.214, 0.085),
- (0.700, 0.266, 0.106),
- (0.700, 0.309, 0.123),
- (0.700, 0.343, 0.137),
- (0.700, 0.372, 0.148))
+ val expected = List(
+ (0.700, 0.000, 0.000),
+ (0.700, 0.018, 0.007),
+ (0.700, 0.051, 0.020),
+ (0.700, 0.096, 0.038),
+ (0.700, 0.151, 0.060),
+ (0.700, 0.214, 0.085),
+ (0.700, 0.266, 0.106),
+ (0.700, 0.309, 0.123),
+ (0.700, 0.343, 0.137),
+ (0.700, 0.372, 0.148))
expected.size should ===(sampleCount)
history.zip(expected).foreach {
case (mockMetrics, expectedData) =>
(mockMetrics, expectedData) match {
- case (Cpu(_, _, loadAverageMock, cpuCombinedMock, cpuStolenMock, _),
- (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) =>
+ case (
+ Cpu(_, _, loadAverageMock, cpuCombinedMock, cpuStolenMock, _),
+ (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) =>
loadAverageMock.get should ===(loadAverageEwma +- epsilon)
cpuCombinedMock.get should ===(cpuCombinedEwma +- epsilon)
cpuStolenMock.get should ===(cpuStolenEwma +- epsilon)
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
index fd126af20f..955c36e825 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
@@ -25,42 +25,50 @@ class MetricsSelectorSpec extends WordSpec with Matchers {
val decayFactor = Some(0.18)
- val nodeMetricsA = NodeMetrics(a1,
- System.currentTimeMillis,
- Set(Metric.create(HeapMemoryUsed, 128, decayFactor),
- Metric.create(HeapMemoryCommitted, 256, decayFactor),
- Metric.create(HeapMemoryMax, 512, None),
- Metric.create(CpuCombined, 0.2, decayFactor),
- Metric.create(CpuStolen, 0.1, decayFactor),
- Metric.create(SystemLoadAverage, 0.5, None),
- Metric.create(Processors, 8, None)).flatten)
+ val nodeMetricsA = NodeMetrics(
+ a1,
+ System.currentTimeMillis,
+ Set(
+ Metric.create(HeapMemoryUsed, 128, decayFactor),
+ Metric.create(HeapMemoryCommitted, 256, decayFactor),
+ Metric.create(HeapMemoryMax, 512, None),
+ Metric.create(CpuCombined, 0.2, decayFactor),
+ Metric.create(CpuStolen, 0.1, decayFactor),
+ Metric.create(SystemLoadAverage, 0.5, None),
+ Metric.create(Processors, 8, None)).flatten)
- val nodeMetricsB = NodeMetrics(b1,
- System.currentTimeMillis,
- Set(Metric.create(HeapMemoryUsed, 256, decayFactor),
- Metric.create(HeapMemoryCommitted, 512, decayFactor),
- Metric.create(HeapMemoryMax, 1024, None),
- Metric.create(CpuCombined, 0.4, decayFactor),
- Metric.create(CpuStolen, 0.2, decayFactor),
- Metric.create(SystemLoadAverage, 1.0, None),
- Metric.create(Processors, 16, None)).flatten)
+ val nodeMetricsB = NodeMetrics(
+ b1,
+ System.currentTimeMillis,
+ Set(
+ Metric.create(HeapMemoryUsed, 256, decayFactor),
+ Metric.create(HeapMemoryCommitted, 512, decayFactor),
+ Metric.create(HeapMemoryMax, 1024, None),
+ Metric.create(CpuCombined, 0.4, decayFactor),
+ Metric.create(CpuStolen, 0.2, decayFactor),
+ Metric.create(SystemLoadAverage, 1.0, None),
+ Metric.create(Processors, 16, None)).flatten)
- val nodeMetricsC = NodeMetrics(c1,
- System.currentTimeMillis,
- Set(Metric.create(HeapMemoryUsed, 1024, decayFactor),
- Metric.create(HeapMemoryCommitted, 1024, decayFactor),
- Metric.create(HeapMemoryMax, 1024, None),
- Metric.create(CpuCombined, 0.6, decayFactor),
- Metric.create(CpuStolen, 0.3, decayFactor),
- Metric.create(SystemLoadAverage, 16.0, None),
- Metric.create(Processors, 16, None)).flatten)
+ val nodeMetricsC = NodeMetrics(
+ c1,
+ System.currentTimeMillis,
+ Set(
+ Metric.create(HeapMemoryUsed, 1024, decayFactor),
+ Metric.create(HeapMemoryCommitted, 1024, decayFactor),
+ Metric.create(HeapMemoryMax, 1024, None),
+ Metric.create(CpuCombined, 0.6, decayFactor),
+ Metric.create(CpuStolen, 0.3, decayFactor),
+ Metric.create(SystemLoadAverage, 16.0, None),
+ Metric.create(Processors, 16, None)).flatten)
- val nodeMetricsD = NodeMetrics(d1,
- System.currentTimeMillis,
- Set(Metric.create(HeapMemoryUsed, 511, decayFactor),
- Metric.create(HeapMemoryCommitted, 512, decayFactor),
- Metric.create(HeapMemoryMax, 512, None),
- Metric.create(Processors, 2, decayFactor)).flatten)
+ val nodeMetricsD = NodeMetrics(
+ d1,
+ System.currentTimeMillis,
+ Set(
+ Metric.create(HeapMemoryUsed, 511, decayFactor),
+ Metric.create(HeapMemoryCommitted, 512, decayFactor),
+ Metric.create(HeapMemoryMax, 512, None),
+ Metric.create(Processors, 2, decayFactor)).flatten)
val nodeMetrics = Set(nodeMetricsA, nodeMetricsB, nodeMetricsC, nodeMetricsD)
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
index 04c9dc8609..9e8691aba0 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
@@ -51,11 +51,12 @@ case class SimpleSigarProvider(location: String = "native") extends SigarProvide
/**
* Provide sigar library as static mock.
*/
-case class MockitoSigarProvider(pid: Long = 123,
- loadAverage: Array[Double] = Array(0.7, 0.3, 0.1),
- cpuCombined: Double = 0.5,
- cpuStolen: Double = 0.2,
- steps: Int = 5)
+case class MockitoSigarProvider(
+ pid: Long = 123,
+ loadAverage: Array[Double] = Array(0.7, 0.3, 0.1),
+ cpuCombined: Double = 0.5,
+ cpuStolen: Double = 0.2,
+ steps: Int = 5)
extends SigarProvider
with MockitoSugar {
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
index fdb474e464..c2668662d4 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
@@ -43,15 +43,18 @@ class MessageSerializerSpec extends AkkaSpec("""
"be serializable" in {
val metricsGossip = MetricsGossip(
- Set(NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))),
- NodeMetrics(b1.address,
- 4712,
- Set(Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))),
- Metric("bar1", Double.MinPositiveValue, None),
- Metric("bar2", Float.MaxValue, None),
- Metric("bar3", Int.MaxValue, None),
- Metric("bar4", Long.MaxValue, None),
- Metric("bar5", BigInt(Long.MaxValue), None)))))
+ Set(
+ NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))),
+ NodeMetrics(
+ b1.address,
+ 4712,
+ Set(
+ Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))),
+ Metric("bar1", Double.MinPositiveValue, None),
+ Metric("bar2", Float.MaxValue, None),
+ Metric("bar3", Int.MaxValue, None),
+ Metric("bar4", Long.MaxValue, None),
+ Metric("bar5", BigInt(Long.MaxValue), None)))))
checkSerialization(MetricsGossipEnvelope(a1.address, metricsGossip, true))
diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala
index 27b7f327e0..fa3cb46c4d 100644
--- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala
+++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala
@@ -33,59 +33,61 @@ object ClusterShardingSettings {
apply(system)
/** INTERNAL API: Indended only for internal use, it is not recommended to keep converting between the setting types */
- private[akka] def fromUntypedSettings(numberOfShards: Int,
- untypedSettings: UntypedShardingSettings): ClusterShardingSettings = {
- new ClusterShardingSettings(numberOfShards,
- role = untypedSettings.role,
- dataCenter = None,
- rememberEntities = untypedSettings.rememberEntities,
- journalPluginId = untypedSettings.journalPluginId,
- snapshotPluginId = untypedSettings.snapshotPluginId,
- passivateIdleEntityAfter = untypedSettings.passivateIdleEntityAfter,
- stateStoreMode = StateStoreMode.byName(untypedSettings.stateStoreMode),
- new TuningParameters(untypedSettings.tuningParameters),
- new ClusterSingletonManagerSettings(
- untypedSettings.coordinatorSingletonSettings.singletonName,
- untypedSettings.coordinatorSingletonSettings.role,
- untypedSettings.coordinatorSingletonSettings.removalMargin,
- untypedSettings.coordinatorSingletonSettings.handOverRetryInterval))
+ private[akka] def fromUntypedSettings(
+ numberOfShards: Int,
+ untypedSettings: UntypedShardingSettings): ClusterShardingSettings = {
+ new ClusterShardingSettings(
+ numberOfShards,
+ role = untypedSettings.role,
+ dataCenter = None,
+ rememberEntities = untypedSettings.rememberEntities,
+ journalPluginId = untypedSettings.journalPluginId,
+ snapshotPluginId = untypedSettings.snapshotPluginId,
+ passivateIdleEntityAfter = untypedSettings.passivateIdleEntityAfter,
+ stateStoreMode = StateStoreMode.byName(untypedSettings.stateStoreMode),
+ new TuningParameters(untypedSettings.tuningParameters),
+ new ClusterSingletonManagerSettings(
+ untypedSettings.coordinatorSingletonSettings.singletonName,
+ untypedSettings.coordinatorSingletonSettings.role,
+ untypedSettings.coordinatorSingletonSettings.removalMargin,
+ untypedSettings.coordinatorSingletonSettings.handOverRetryInterval))
}
/** INTERNAL API: Indended only for internal use, it is not recommended to keep converting between the setting types */
private[akka] def toUntypedSettings(settings: ClusterShardingSettings): UntypedShardingSettings = {
- new UntypedShardingSettings(role = settings.role,
- rememberEntities = settings.rememberEntities,
- journalPluginId = settings.journalPluginId,
- snapshotPluginId = settings.snapshotPluginId,
- stateStoreMode = settings.stateStoreMode.name,
- passivateIdleEntityAfter = settings.passivateIdleEntityAfter,
- new UntypedShardingSettings.TuningParameters(
- bufferSize = settings.tuningParameters.bufferSize,
- coordinatorFailureBackoff = settings.tuningParameters.coordinatorFailureBackoff,
- retryInterval = settings.tuningParameters.retryInterval,
- handOffTimeout = settings.tuningParameters.handOffTimeout,
- shardStartTimeout = settings.tuningParameters.shardStartTimeout,
- shardFailureBackoff = settings.tuningParameters.shardFailureBackoff,
- entityRestartBackoff = settings.tuningParameters.entityRestartBackoff,
- rebalanceInterval = settings.tuningParameters.rebalanceInterval,
- snapshotAfter = settings.tuningParameters.snapshotAfter,
- keepNrOfBatches = settings.tuningParameters.keepNrOfBatches,
- leastShardAllocationRebalanceThreshold =
- settings.tuningParameters.leastShardAllocationRebalanceThreshold, // TODO extract it a bit
- leastShardAllocationMaxSimultaneousRebalance =
- settings.tuningParameters.leastShardAllocationMaxSimultaneousRebalance,
- waitingForStateTimeout = settings.tuningParameters.waitingForStateTimeout,
- updatingStateTimeout = settings.tuningParameters.updatingStateTimeout,
- entityRecoveryStrategy = settings.tuningParameters.entityRecoveryStrategy,
- entityRecoveryConstantRateStrategyFrequency =
- settings.tuningParameters.entityRecoveryConstantRateStrategyFrequency,
- entityRecoveryConstantRateStrategyNumberOfEntities =
- settings.tuningParameters.entityRecoveryConstantRateStrategyNumberOfEntities),
- new UntypedClusterSingletonManagerSettings(
- settings.coordinatorSingletonSettings.singletonName,
- settings.coordinatorSingletonSettings.role,
- settings.coordinatorSingletonSettings.removalMargin,
- settings.coordinatorSingletonSettings.handOverRetryInterval))
+ new UntypedShardingSettings(
+ role = settings.role,
+ rememberEntities = settings.rememberEntities,
+ journalPluginId = settings.journalPluginId,
+ snapshotPluginId = settings.snapshotPluginId,
+ stateStoreMode = settings.stateStoreMode.name,
+ passivateIdleEntityAfter = settings.passivateIdleEntityAfter,
+ new UntypedShardingSettings.TuningParameters(
+ bufferSize = settings.tuningParameters.bufferSize,
+ coordinatorFailureBackoff = settings.tuningParameters.coordinatorFailureBackoff,
+ retryInterval = settings.tuningParameters.retryInterval,
+ handOffTimeout = settings.tuningParameters.handOffTimeout,
+ shardStartTimeout = settings.tuningParameters.shardStartTimeout,
+ shardFailureBackoff = settings.tuningParameters.shardFailureBackoff,
+ entityRestartBackoff = settings.tuningParameters.entityRestartBackoff,
+ rebalanceInterval = settings.tuningParameters.rebalanceInterval,
+ snapshotAfter = settings.tuningParameters.snapshotAfter,
+ keepNrOfBatches = settings.tuningParameters.keepNrOfBatches,
+ leastShardAllocationRebalanceThreshold = settings.tuningParameters.leastShardAllocationRebalanceThreshold, // TODO extract it a bit
+ leastShardAllocationMaxSimultaneousRebalance =
+ settings.tuningParameters.leastShardAllocationMaxSimultaneousRebalance,
+ waitingForStateTimeout = settings.tuningParameters.waitingForStateTimeout,
+ updatingStateTimeout = settings.tuningParameters.updatingStateTimeout,
+ entityRecoveryStrategy = settings.tuningParameters.entityRecoveryStrategy,
+ entityRecoveryConstantRateStrategyFrequency =
+ settings.tuningParameters.entityRecoveryConstantRateStrategyFrequency,
+ entityRecoveryConstantRateStrategyNumberOfEntities =
+ settings.tuningParameters.entityRecoveryConstantRateStrategyNumberOfEntities),
+ new UntypedClusterSingletonManagerSettings(
+ settings.coordinatorSingletonSettings.singletonName,
+ settings.coordinatorSingletonSettings.role,
+ settings.coordinatorSingletonSettings.removalMargin,
+ settings.coordinatorSingletonSettings.handOverRetryInterval))
}
@@ -105,23 +107,24 @@ object ClusterShardingSettings {
final case object StateStoreModeDData extends StateStoreMode { override def name = "ddata" }
// generated using kaze-class
- final class TuningParameters private (val bufferSize: Int,
- val coordinatorFailureBackoff: FiniteDuration,
- val entityRecoveryConstantRateStrategyFrequency: FiniteDuration,
- val entityRecoveryConstantRateStrategyNumberOfEntities: Int,
- val entityRecoveryStrategy: String,
- val entityRestartBackoff: FiniteDuration,
- val handOffTimeout: FiniteDuration,
- val keepNrOfBatches: Int,
- val leastShardAllocationMaxSimultaneousRebalance: Int,
- val leastShardAllocationRebalanceThreshold: Int,
- val rebalanceInterval: FiniteDuration,
- val retryInterval: FiniteDuration,
- val shardFailureBackoff: FiniteDuration,
- val shardStartTimeout: FiniteDuration,
- val snapshotAfter: Int,
- val updatingStateTimeout: FiniteDuration,
- val waitingForStateTimeout: FiniteDuration) {
+ final class TuningParameters private (
+ val bufferSize: Int,
+ val coordinatorFailureBackoff: FiniteDuration,
+ val entityRecoveryConstantRateStrategyFrequency: FiniteDuration,
+ val entityRecoveryConstantRateStrategyNumberOfEntities: Int,
+ val entityRecoveryStrategy: String,
+ val entityRestartBackoff: FiniteDuration,
+ val handOffTimeout: FiniteDuration,
+ val keepNrOfBatches: Int,
+ val leastShardAllocationMaxSimultaneousRebalance: Int,
+ val leastShardAllocationRebalanceThreshold: Int,
+ val rebalanceInterval: FiniteDuration,
+ val retryInterval: FiniteDuration,
+ val shardFailureBackoff: FiniteDuration,
+ val shardStartTimeout: FiniteDuration,
+ val snapshotAfter: Int,
+ val updatingStateTimeout: FiniteDuration,
+ val waitingForStateTimeout: FiniteDuration) {
def this(untyped: UntypedShardingSettings.TuningParameters) {
this(
@@ -145,8 +148,9 @@ object ClusterShardingSettings {
}
- require(entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant",
- s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'")
+ require(
+ entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant",
+ s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'")
def withBufferSize(value: Int): TuningParameters = copy(bufferSize = value)
def withCoordinatorFailureBackoff(value: FiniteDuration): TuningParameters = copy(coordinatorFailureBackoff = value)
@@ -201,24 +205,24 @@ object ClusterShardingSettings {
snapshotAfter: Int = snapshotAfter,
updatingStateTimeout: FiniteDuration = updatingStateTimeout,
waitingForStateTimeout: FiniteDuration = waitingForStateTimeout): TuningParameters =
- new TuningParameters(bufferSize = bufferSize,
- coordinatorFailureBackoff = coordinatorFailureBackoff,
- entityRecoveryConstantRateStrategyFrequency = entityRecoveryConstantRateStrategyFrequency,
- entityRecoveryConstantRateStrategyNumberOfEntities =
- entityRecoveryConstantRateStrategyNumberOfEntities,
- entityRecoveryStrategy = entityRecoveryStrategy,
- entityRestartBackoff = entityRestartBackoff,
- handOffTimeout = handOffTimeout,
- keepNrOfBatches = keepNrOfBatches,
- leastShardAllocationMaxSimultaneousRebalance = leastShardAllocationMaxSimultaneousRebalance,
- leastShardAllocationRebalanceThreshold = leastShardAllocationRebalanceThreshold,
- rebalanceInterval = rebalanceInterval,
- retryInterval = retryInterval,
- shardFailureBackoff = shardFailureBackoff,
- shardStartTimeout = shardStartTimeout,
- snapshotAfter = snapshotAfter,
- updatingStateTimeout = updatingStateTimeout,
- waitingForStateTimeout = waitingForStateTimeout)
+ new TuningParameters(
+ bufferSize = bufferSize,
+ coordinatorFailureBackoff = coordinatorFailureBackoff,
+ entityRecoveryConstantRateStrategyFrequency = entityRecoveryConstantRateStrategyFrequency,
+ entityRecoveryConstantRateStrategyNumberOfEntities = entityRecoveryConstantRateStrategyNumberOfEntities,
+ entityRecoveryStrategy = entityRecoveryStrategy,
+ entityRestartBackoff = entityRestartBackoff,
+ handOffTimeout = handOffTimeout,
+ keepNrOfBatches = keepNrOfBatches,
+ leastShardAllocationMaxSimultaneousRebalance = leastShardAllocationMaxSimultaneousRebalance,
+ leastShardAllocationRebalanceThreshold = leastShardAllocationRebalanceThreshold,
+ rebalanceInterval = rebalanceInterval,
+ retryInterval = retryInterval,
+ shardFailureBackoff = shardFailureBackoff,
+ shardStartTimeout = shardStartTimeout,
+ snapshotAfter = snapshotAfter,
+ updatingStateTimeout = updatingStateTimeout,
+ waitingForStateTimeout = waitingForStateTimeout)
override def toString =
s"""TuningParameters($bufferSize,$coordinatorFailureBackoff,$entityRecoveryConstantRateStrategyFrequency,$entityRecoveryConstantRateStrategyNumberOfEntities,$entityRecoveryStrategy,$entityRestartBackoff,$handOffTimeout,$keepNrOfBatches,$leastShardAllocationMaxSimultaneousRebalance,$leastShardAllocationRebalanceThreshold,$rebalanceInterval,$retryInterval,$shardFailureBackoff,$shardStartTimeout,$snapshotAfter,$updatingStateTimeout,$waitingForStateTimeout)"""
@@ -250,23 +254,25 @@ object ClusterShardingSettings {
* actors.
* @param tuningParameters additional tuning parameters, see descriptions in reference.conf
*/
-final class ClusterShardingSettings(val numberOfShards: Int,
- val role: Option[String],
- val dataCenter: Option[DataCenter],
- val rememberEntities: Boolean,
- val journalPluginId: String,
- val snapshotPluginId: String,
- val passivateIdleEntityAfter: FiniteDuration,
- val stateStoreMode: ClusterShardingSettings.StateStoreMode,
- val tuningParameters: ClusterShardingSettings.TuningParameters,
- val coordinatorSingletonSettings: ClusterSingletonManagerSettings)
+final class ClusterShardingSettings(
+ val numberOfShards: Int,
+ val role: Option[String],
+ val dataCenter: Option[DataCenter],
+ val rememberEntities: Boolean,
+ val journalPluginId: String,
+ val snapshotPluginId: String,
+ val passivateIdleEntityAfter: FiniteDuration,
+ val stateStoreMode: ClusterShardingSettings.StateStoreMode,
+ val tuningParameters: ClusterShardingSettings.TuningParameters,
+ val coordinatorSingletonSettings: ClusterSingletonManagerSettings)
extends NoSerializationVerificationNeeded {
import akka.cluster.sharding.typed.ClusterShardingSettings.StateStoreModeDData
import akka.cluster.sharding.typed.ClusterShardingSettings.StateStoreModePersistence
- require(stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData,
- s"Unknown 'state-store-mode' [$stateStoreMode], " +
- s"valid values are '${StateStoreModeDData.name}' or '${StateStoreModePersistence.name}'")
+ require(
+ stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData,
+ s"Unknown 'state-store-mode' [$stateStoreMode], " +
+ s"valid values are '${StateStoreModeDData.name}' or '${StateStoreModePersistence.name}'")
/**
* INTERNAL API
@@ -315,23 +321,25 @@ final class ClusterShardingSettings(val numberOfShards: Int,
coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings =
copy(coordinatorSingletonSettings = coordinatorSingletonSettings)
- private def copy(role: Option[String] = role,
- dataCenter: Option[DataCenter] = dataCenter,
- rememberEntities: Boolean = rememberEntities,
- journalPluginId: String = journalPluginId,
- snapshotPluginId: String = snapshotPluginId,
- stateStoreMode: ClusterShardingSettings.StateStoreMode = stateStoreMode,
- tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters,
- coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings,
- passivateIdleEntityAfter: FiniteDuration = passivateIdleEntityAfter): ClusterShardingSettings =
- new ClusterShardingSettings(numberOfShards,
- role,
- dataCenter,
- rememberEntities,
- journalPluginId,
- snapshotPluginId,
- passivateIdleEntityAfter,
- stateStoreMode,
- tuningParameters,
- coordinatorSingletonSettings)
+ private def copy(
+ role: Option[String] = role,
+ dataCenter: Option[DataCenter] = dataCenter,
+ rememberEntities: Boolean = rememberEntities,
+ journalPluginId: String = journalPluginId,
+ snapshotPluginId: String = snapshotPluginId,
+ stateStoreMode: ClusterShardingSettings.StateStoreMode = stateStoreMode,
+ tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters,
+ coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings,
+ passivateIdleEntityAfter: FiniteDuration = passivateIdleEntityAfter): ClusterShardingSettings =
+ new ClusterShardingSettings(
+ numberOfShards,
+ role,
+ dataCenter,
+ rememberEntities,
+ journalPluginId,
+ snapshotPluginId,
+ passivateIdleEntityAfter,
+ stateStoreMode,
+ tuningParameters,
+ coordinatorSingletonSettings)
}
diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala
index 325d854d3b..b321c85698 100644
--- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala
+++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala
@@ -93,10 +93,10 @@ import akka.util.Timeout
/**
* INTERNAL API
*/
-@InternalApi private[akka] final case class EntityTypeKeyImpl[T](name: String,
- messageClassName: String,
- entityIdSeparator: String =
- EntityTypeKeyImpl.EntityIdSeparator)
+@InternalApi private[akka] final case class EntityTypeKeyImpl[T](
+ name: String,
+ messageClassName: String,
+ entityIdSeparator: String = EntityTypeKeyImpl.EntityIdSeparator)
extends javadsl.EntityTypeKey[T]
with scaladsl.EntityTypeKey[T] {
@@ -127,8 +127,9 @@ import akka.util.Timeout
import akka.actor.typed.scaladsl.adapter._
- require(system.isInstanceOf[ActorSystemAdapter[_]],
- "only adapted untyped actor systems can be used for cluster features")
+ require(
+ system.isInstanceOf[ActorSystemAdapter[_]],
+ "only adapted untyped actor systems can be used for cluster features")
private val cluster = Cluster(system)
private val untypedSystem: ExtendedActorSystem = system.toUntyped.asInstanceOf[ExtendedActorSystem]
@@ -153,39 +154,41 @@ import akka.util.Timeout
case Some(e) => e
}).asInstanceOf[ShardingMessageExtractor[E, M]]
- internalInit(entity.createBehavior,
- entity.entityProps,
- entity.typeKey,
- entity.stopMessage,
- settings,
- extractor,
- entity.allocationStrategy)
+ internalInit(
+ entity.createBehavior,
+ entity.entityProps,
+ entity.typeKey,
+ entity.stopMessage,
+ settings,
+ extractor,
+ entity.allocationStrategy)
}
// javadsl impl
override def init[M, E](entity: javadsl.Entity[M, E]): ActorRef[E] = {
import scala.compat.java8.OptionConverters._
init(
- new scaladsl.Entity(createBehavior = (ctx: EntityContext) =>
- Behaviors.setup[M] { actorContext =>
- entity.createBehavior(
- new javadsl.EntityContext[M](ctx.entityId, ctx.shard, actorContext.asJava))
- },
- typeKey = entity.typeKey.asScala,
- stopMessage = entity.stopMessage.asScala,
- entityProps = entity.entityProps,
- settings = entity.settings.asScala,
- messageExtractor = entity.messageExtractor.asScala,
- allocationStrategy = entity.allocationStrategy.asScala))
+ new scaladsl.Entity(
+ createBehavior = (ctx: EntityContext) =>
+ Behaviors.setup[M] { actorContext =>
+ entity.createBehavior(new javadsl.EntityContext[M](ctx.entityId, ctx.shard, actorContext.asJava))
+ },
+ typeKey = entity.typeKey.asScala,
+ stopMessage = entity.stopMessage.asScala,
+ entityProps = entity.entityProps,
+ settings = entity.settings.asScala,
+ messageExtractor = entity.messageExtractor.asScala,
+ allocationStrategy = entity.allocationStrategy.asScala))
}
- private def internalInit[M, E](behavior: EntityContext => Behavior[M],
- entityProps: Props,
- typeKey: scaladsl.EntityTypeKey[M],
- stopMessage: Option[M],
- settings: ClusterShardingSettings,
- extractor: ShardingMessageExtractor[E, M],
- allocationStrategy: Option[ShardAllocationStrategy]): ActorRef[E] = {
+ private def internalInit[M, E](
+ behavior: EntityContext => Behavior[M],
+ entityProps: Props,
+ typeKey: scaladsl.EntityTypeKey[M],
+ stopMessage: Option[M],
+ settings: ClusterShardingSettings,
+ extractor: ShardingMessageExtractor[E, M],
+ allocationStrategy: Option[ShardAllocationStrategy]): ActorRef[E] = {
val extractorAdapter = new ExtractorAdapter(extractor)
val extractEntityId: ShardRegion.ExtractEntityId = {
@@ -205,22 +208,18 @@ import akka.util.Timeout
log.info("Starting Shard Region [{}]...", typeKey.name)
val shardCommandDelegator: ActorRef[scaladsl.ClusterSharding.ShardCommand] =
- shardCommandActors.computeIfAbsent(typeKey.name,
- new java.util.function.Function[
- String,
- ActorRef[scaladsl.ClusterSharding.ShardCommand]] {
- override def apply(
- t: String): ActorRef[scaladsl.ClusterSharding.ShardCommand] = {
- // using untyped.systemActorOf to avoid the Future[ActorRef]
- system.toUntyped
- .asInstanceOf[ExtendedActorSystem]
- .systemActorOf(
- PropsAdapter(
- ShardCommandActor.behavior(stopMessage.getOrElse(PoisonPill))),
- URLEncoder
- .encode(typeKey.name, ByteString.UTF_8) + "ShardCommandDelegator")
- }
- })
+ shardCommandActors.computeIfAbsent(
+ typeKey.name,
+ new java.util.function.Function[String, ActorRef[scaladsl.ClusterSharding.ShardCommand]] {
+ override def apply(t: String): ActorRef[scaladsl.ClusterSharding.ShardCommand] = {
+ // using untyped.systemActorOf to avoid the Future[ActorRef]
+ system.toUntyped
+ .asInstanceOf[ExtendedActorSystem]
+ .systemActorOf(
+ PropsAdapter(ShardCommandActor.behavior(stopMessage.getOrElse(PoisonPill))),
+ URLEncoder.encode(typeKey.name, ByteString.UTF_8) + "ShardCommandDelegator")
+ }
+ })
def poisonPillInterceptor(behv: Behavior[M]): Behavior[M] = {
stopMessage match {
@@ -233,25 +232,28 @@ import akka.util.Timeout
val behv = behavior(new EntityContext(entityId, shardCommandDelegator))
PropsAdapter(poisonPillInterceptor(behv), entityProps)
}
- untypedSharding.internalStart(typeKey.name,
- untypedEntityPropsFactory,
- ClusterShardingSettings.toUntypedSettings(settings),
- extractEntityId,
- extractShardId,
- allocationStrategy.getOrElse(defaultShardAllocationStrategy(settings)),
- stopMessage.getOrElse(PoisonPill))
+ untypedSharding.internalStart(
+ typeKey.name,
+ untypedEntityPropsFactory,
+ ClusterShardingSettings.toUntypedSettings(settings),
+ extractEntityId,
+ extractShardId,
+ allocationStrategy.getOrElse(defaultShardAllocationStrategy(settings)),
+ stopMessage.getOrElse(PoisonPill))
} else {
- log.info("Starting Shard Region Proxy [{}] (no actors will be hosted on this node) " +
- "for role [{}] and dataCenter [{}] ...",
- typeKey.name,
- settings.role,
- settings.dataCenter)
+ log.info(
+ "Starting Shard Region Proxy [{}] (no actors will be hosted on this node) " +
+ "for role [{}] and dataCenter [{}] ...",
+ typeKey.name,
+ settings.role,
+ settings.dataCenter)
- untypedSharding.startProxy(typeKey.name,
- settings.role,
- dataCenter = settings.dataCenter,
- extractEntityId,
- extractShardId)
+ untypedSharding.startProxy(
+ typeKey.name,
+ settings.role,
+ dataCenter = settings.dataCenter,
+ extractEntityId,
+ extractShardId)
}
val messageClassName = typeKey.asInstanceOf[EntityTypeKeyImpl[M]].messageClassName
@@ -268,17 +270,19 @@ import akka.util.Timeout
}
override def entityRefFor[M](typeKey: scaladsl.EntityTypeKey[M], entityId: String): scaladsl.EntityRef[M] = {
- new EntityRefImpl[M](untypedSharding.shardRegion(typeKey.name),
- entityId,
- typeKey.asInstanceOf[EntityTypeKeyImpl[M]],
- system.scheduler)
+ new EntityRefImpl[M](
+ untypedSharding.shardRegion(typeKey.name),
+ entityId,
+ typeKey.asInstanceOf[EntityTypeKeyImpl[M]],
+ system.scheduler)
}
override def entityRefFor[M](typeKey: javadsl.EntityTypeKey[M], entityId: String): javadsl.EntityRef[M] = {
- new EntityRefImpl[M](untypedSharding.shardRegion(typeKey.name),
- entityId,
- typeKey.asInstanceOf[EntityTypeKeyImpl[M]],
- system.scheduler)
+ new EntityRefImpl[M](
+ untypedSharding.shardRegion(typeKey.name),
+ entityId,
+ typeKey.asInstanceOf[EntityTypeKeyImpl[M]],
+ system.scheduler)
}
override def defaultShardAllocationStrategy(settings: ClusterShardingSettings): ShardAllocationStrategy = {
@@ -298,10 +302,11 @@ import akka.util.Timeout
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class EntityRefImpl[M](shardRegion: akka.actor.ActorRef,
- entityId: String,
- typeKey: EntityTypeKeyImpl[M],
- scheduler: Scheduler)
+@InternalApi private[akka] final class EntityRefImpl[M](
+ shardRegion: akka.actor.ActorRef,
+ entityId: String,
+ typeKey: EntityTypeKeyImpl[M],
+ scheduler: Scheduler)
extends javadsl.EntityRef[M]
with scaladsl.EntityRef[M]
with InternalRecipientRef[M] {
@@ -328,16 +333,18 @@ import akka.util.Timeout
// Note: _promiseRef mustn't have a type pattern, since it can be null
private[this] val (_ref: ActorRef[U], _future: Future[U], _promiseRef) =
if (untyped.isTerminated)
- (adapt.ActorRefAdapter[U](untyped.provider.deadLetters),
- Future.failed[U](
- new AskTimeoutException(s"Recipient shard region of [${EntityRefImpl.this}] had already been terminated.")),
- null)
+ (
+ adapt.ActorRefAdapter[U](untyped.provider.deadLetters),
+ Future.failed[U](
+ new AskTimeoutException(s"Recipient shard region of [${EntityRefImpl.this}] had already been terminated.")),
+ null)
else if (timeout.duration.length <= 0)
- (adapt.ActorRefAdapter[U](untyped.provider.deadLetters),
- Future.failed[U](
- new IllegalArgumentException(
- s"Timeout length must be positive, question not sent to [${EntityRefImpl.this}]")),
- null)
+ (
+ adapt.ActorRefAdapter[U](untyped.provider.deadLetters),
+ Future.failed[U](
+ new IllegalArgumentException(
+ s"Timeout length must be positive, question not sent to [${EntityRefImpl.this}]")),
+ null)
else {
// note that the real messageClassName will be set afterwards, replyTo pattern
val a =
diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala
index f7ffa0c651..f7bdc2dbe6 100644
--- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala
+++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala
@@ -213,15 +213,17 @@ object Entity {
* @param createBehavior Create the behavior for an entity given a [[EntityContext]] (includes entityId)
* @tparam M The type of message the entity accepts
*/
- def of[M](typeKey: EntityTypeKey[M],
- createBehavior: JFunction[EntityContext[M], Behavior[M]]): Entity[M, ShardingEnvelope[M]] = {
- new Entity(createBehavior,
- typeKey,
- Optional.empty(),
- Props.empty,
- Optional.empty(),
- Optional.empty(),
- Optional.empty())
+ def of[M](
+ typeKey: EntityTypeKey[M],
+ createBehavior: JFunction[EntityContext[M], Behavior[M]]): Entity[M, ShardingEnvelope[M]] = {
+ new Entity(
+ createBehavior,
+ typeKey,
+ Optional.empty(),
+ Props.empty,
+ Optional.empty(),
+ Optional.empty(),
+ Optional.empty())
}
/**
@@ -240,17 +242,18 @@ object Entity {
createPersistentEntity: JFunction[EntityContext[Command], EventSourcedEntity[Command, Event, State]])
: Entity[Command, ShardingEnvelope[Command]] = {
- of(typeKey,
- new JFunction[EntityContext[Command], Behavior[Command]] {
- override def apply(ctx: EntityContext[Command]): Behavior[Command] = {
- val persistentEntity = createPersistentEntity(ctx)
- if (persistentEntity.entityTypeKey != typeKey)
- throw new IllegalArgumentException(
- s"The [${persistentEntity.entityTypeKey}] of the PersistentEntity " +
- s" [${persistentEntity.getClass.getName}] doesn't match expected $typeKey.")
- persistentEntity
- }
- })
+ of(
+ typeKey,
+ new JFunction[EntityContext[Command], Behavior[Command]] {
+ override def apply(ctx: EntityContext[Command]): Behavior[Command] = {
+ val persistentEntity = createPersistentEntity(ctx)
+ if (persistentEntity.entityTypeKey != typeKey)
+ throw new IllegalArgumentException(
+ s"The [${persistentEntity.entityTypeKey}] of the PersistentEntity " +
+ s" [${persistentEntity.getClass.getName}] doesn't match expected $typeKey.")
+ persistentEntity
+ }
+ })
}
}
@@ -258,13 +261,14 @@ object Entity {
/**
* Defines how the entity should be created. Used in [[ClusterSharding#init]].
*/
-final class Entity[M, E] private (val createBehavior: JFunction[EntityContext[M], Behavior[M]],
- val typeKey: EntityTypeKey[M],
- val stopMessage: Optional[M],
- val entityProps: Props,
- val settings: Optional[ClusterShardingSettings],
- val messageExtractor: Optional[ShardingMessageExtractor[E, M]],
- val allocationStrategy: Optional[ShardAllocationStrategy]) {
+final class Entity[M, E] private (
+ val createBehavior: JFunction[EntityContext[M], Behavior[M]],
+ val typeKey: EntityTypeKey[M],
+ val stopMessage: Optional[M],
+ val entityProps: Props,
+ val settings: Optional[ClusterShardingSettings],
+ val messageExtractor: Optional[ShardingMessageExtractor[E, M]],
+ val allocationStrategy: Optional[ShardAllocationStrategy]) {
/**
* [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings.
@@ -296,13 +300,14 @@ final class Entity[M, E] private (val createBehavior: JFunction[EntityContext[M]
* is configured with `akka.cluster.sharding.number-of-shards`.
*/
def withMessageExtractor[Envelope](newExtractor: ShardingMessageExtractor[Envelope, M]): Entity[M, Envelope] =
- new Entity(createBehavior,
- typeKey,
- stopMessage,
- entityProps,
- settings,
- Optional.ofNullable(newExtractor),
- allocationStrategy)
+ new Entity(
+ createBehavior,
+ typeKey,
+ stopMessage,
+ entityProps,
+ settings,
+ Optional.ofNullable(newExtractor),
+ allocationStrategy)
/**
* Allocation strategy which decides on which nodes to allocate new shards,
@@ -311,12 +316,13 @@ final class Entity[M, E] private (val createBehavior: JFunction[EntityContext[M]
def withAllocationStrategy(newAllocationStrategy: ShardAllocationStrategy): Entity[M, E] =
copy(allocationStrategy = Optional.ofNullable(newAllocationStrategy))
- private def copy(createBehavior: JFunction[EntityContext[M], Behavior[M]] = createBehavior,
- typeKey: EntityTypeKey[M] = typeKey,
- stopMessage: Optional[M] = stopMessage,
- entityProps: Props = entityProps,
- settings: Optional[ClusterShardingSettings] = settings,
- allocationStrategy: Optional[ShardAllocationStrategy] = allocationStrategy): Entity[M, E] = {
+ private def copy(
+ createBehavior: JFunction[EntityContext[M], Behavior[M]] = createBehavior,
+ typeKey: EntityTypeKey[M] = typeKey,
+ stopMessage: Optional[M] = stopMessage,
+ entityProps: Props = entityProps,
+ settings: Optional[ClusterShardingSettings] = settings,
+ allocationStrategy: Optional[ShardAllocationStrategy] = allocationStrategy): Entity[M, E] = {
new Entity(createBehavior, typeKey, stopMessage, entityProps, settings, messageExtractor, allocationStrategy)
}
@@ -325,9 +331,10 @@ final class Entity[M, E] private (val createBehavior: JFunction[EntityContext[M]
/**
* Parameter to [[Entity.of]]
*/
-final class EntityContext[M](entityId: String,
- shard: ActorRef[ClusterSharding.ShardCommand],
- actorContext: ActorContext[M]) {
+final class EntityContext[M](
+ entityId: String,
+ shard: ActorRef[ClusterSharding.ShardCommand],
+ actorContext: ActorContext[M]) {
def getEntityId: String = entityId
diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala
index 9f7a6deac1..5873601290 100644
--- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala
+++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala
@@ -27,17 +27,19 @@ abstract class EventSourcedEntity[Command, Event, State >: Null] private (
extends EventSourcedBehavior[Command, Event, State](persistenceId, onPersistFailure) {
def this(entityTypeKey: EntityTypeKey[Command], entityId: String) = {
- this(entityTypeKey,
- entityId,
- persistenceId = entityTypeKey.persistenceIdFrom(entityId),
- Optional.empty[BackoffSupervisorStrategy])
+ this(
+ entityTypeKey,
+ entityId,
+ persistenceId = entityTypeKey.persistenceIdFrom(entityId),
+ Optional.empty[BackoffSupervisorStrategy])
}
def this(entityTypeKey: EntityTypeKey[Command], entityId: String, onPersistFailure: BackoffSupervisorStrategy) = {
- this(entityTypeKey,
- entityId,
- persistenceId = entityTypeKey.persistenceIdFrom(entityId),
- Optional.ofNullable(onPersistFailure))
+ this(
+ entityTypeKey,
+ entityId,
+ persistenceId = entityTypeKey.persistenceIdFrom(entityId),
+ Optional.ofNullable(onPersistFailure))
}
}
diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala
index 28eaf80d63..e2f964af21 100644
--- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala
+++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala
@@ -220,21 +220,23 @@ object Entity {
* @param createBehavior Create the behavior for an entity given a [[EntityContext]] (includes entityId)
* @tparam M The type of message the entity accepts
*/
- def apply[M](typeKey: EntityTypeKey[M],
- createBehavior: EntityContext => Behavior[M]): Entity[M, ShardingEnvelope[M]] =
+ def apply[M](
+ typeKey: EntityTypeKey[M],
+ createBehavior: EntityContext => Behavior[M]): Entity[M, ShardingEnvelope[M]] =
new Entity(createBehavior, typeKey, None, Props.empty, None, None, None)
}
/**
* Defines how the entity should be created. Used in [[ClusterSharding#init]].
*/
-final class Entity[M, E] private[akka] (val createBehavior: EntityContext => Behavior[M],
- val typeKey: EntityTypeKey[M],
- val stopMessage: Option[M],
- val entityProps: Props,
- val settings: Option[ClusterShardingSettings],
- val messageExtractor: Option[ShardingMessageExtractor[E, M]],
- val allocationStrategy: Option[ShardAllocationStrategy]) {
+final class Entity[M, E] private[akka] (
+ val createBehavior: EntityContext => Behavior[M],
+ val typeKey: EntityTypeKey[M],
+ val stopMessage: Option[M],
+ val entityProps: Props,
+ val settings: Option[ClusterShardingSettings],
+ val messageExtractor: Option[ShardingMessageExtractor[E, M]],
+ val allocationStrategy: Option[ShardAllocationStrategy]) {
/**
* [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings.
@@ -275,12 +277,13 @@ final class Entity[M, E] private[akka] (val createBehavior: EntityContext => Beh
def withAllocationStrategy(newAllocationStrategy: ShardAllocationStrategy): Entity[M, E] =
copy(allocationStrategy = Option(newAllocationStrategy))
- private def copy(createBehavior: EntityContext => Behavior[M] = createBehavior,
- typeKey: EntityTypeKey[M] = typeKey,
- stopMessage: Option[M] = stopMessage,
- entityProps: Props = entityProps,
- settings: Option[ClusterShardingSettings] = settings,
- allocationStrategy: Option[ShardAllocationStrategy] = allocationStrategy): Entity[M, E] = {
+ private def copy(
+ createBehavior: EntityContext => Behavior[M] = createBehavior,
+ typeKey: EntityTypeKey[M] = typeKey,
+ stopMessage: Option[M] = stopMessage,
+ entityProps: Props = entityProps,
+ settings: Option[ClusterShardingSettings] = settings,
+ allocationStrategy: Option[ShardAllocationStrategy] = allocationStrategy): Entity[M, E] = {
new Entity(createBehavior, typeKey, stopMessage, entityProps, settings, messageExtractor, allocationStrategy)
}
diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala
index 012c342db7..2e77d2d5bc 100644
--- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala
+++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala
@@ -19,10 +19,11 @@ object EventSourcedEntity {
* automatically from the [[EntityTypeKey]] and `entityId` constructor parameters by using
* [[EntityTypeKey.persistenceIdFrom]].
*/
- def apply[Command, Event, State](entityTypeKey: EntityTypeKey[Command],
- entityId: String,
- emptyState: State,
- commandHandler: (State, Command) => Effect[Event, State],
- eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] =
+ def apply[Command, Event, State](
+ entityTypeKey: EntityTypeKey[Command],
+ entityId: String,
+ emptyState: State,
+ commandHandler: (State, Command) => Effect[Event, State],
+ eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] =
EventSourcedBehavior(entityTypeKey.persistenceIdFrom(entityId), emptyState, commandHandler, eventHandler)
}
diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala
index ecfaf788f6..9e5f06a62a 100644
--- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala
+++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala
@@ -80,53 +80,53 @@ object ClusterShardingPersistenceSpec {
// transient state (testing purpose)
var stashing = false
- EventSourcedEntity[Command, String, String](entityTypeKey = typeKey,
- entityId = entityId,
- emptyState = "",
- commandHandler = (state, cmd) =>
- cmd match {
- case Add(s) =>
- if (stashing)
- Effect.stash()
- else
- Effect.persist(s)
+ EventSourcedEntity[Command, String, String](
+ entityTypeKey = typeKey,
+ entityId = entityId,
+ emptyState = "",
+ commandHandler = (state, cmd) =>
+ cmd match {
+ case Add(s) =>
+ if (stashing)
+ Effect.stash()
+ else
+ Effect.persist(s)
- case cmd @ AddWithConfirmation(s) =>
- if (stashing)
- Effect.stash()
- else
- Effect.persist(s).thenReply(cmd)(_ => Done)
+ case cmd @ AddWithConfirmation(s) =>
+ if (stashing)
+ Effect.stash()
+ else
+ Effect.persist(s).thenReply(cmd)(_ => Done)
- case Get(replyTo) =>
- replyTo ! s"$entityId:$state"
- Effect.none
+ case Get(replyTo) =>
+ replyTo ! s"$entityId:$state"
+ Effect.none
- case cmd @ PassivateAndPersist(s) =>
- shard ! Passivate(ctx.self)
- Effect.persist(s).thenReply(cmd)(_ => Done)
+ case cmd @ PassivateAndPersist(s) =>
+ shard ! Passivate(ctx.self)
+ Effect.persist(s).thenReply(cmd)(_ => Done)
- case Echo(msg, replyTo) =>
- Effect.none.thenRun(_ => replyTo ! msg)
+ case Echo(msg, replyTo) =>
+ Effect.none.thenRun(_ => replyTo ! msg)
- case Block(latch) =>
- latch.await(5, TimeUnit.SECONDS)
- Effect.none
+ case Block(latch) =>
+ latch.await(5, TimeUnit.SECONDS)
+ Effect.none
- case BeginStashingAddCommands =>
- stashing = true
- Effect.none
+ case BeginStashingAddCommands =>
+ stashing = true
+ Effect.none
- case UnstashAll =>
- stashing = false
- Effect.unstashAll()
+ case UnstashAll =>
+ stashing = false
+ Effect.unstashAll()
- case UnstashAllAndPassivate ⇒
- stashing = false
- shard ! Passivate(ctx.self)
- Effect.unstashAll()
- },
- eventHandler = (state, evt) ⇒
- if (state.isEmpty) evt else state + "|" + evt).receiveSignal {
+ case UnstashAllAndPassivate ⇒
+ stashing = false
+ shard ! Passivate(ctx.self)
+ Effect.unstashAll()
+ },
+ eventHandler = (state, evt) ⇒ if (state.isEmpty) evt else state + "|" + evt).receiveSignal {
case RecoveryCompleted(state) ⇒
ctx.log.debug("onRecoveryCompleted: [{}]", state)
lifecycleProbes.get(entityId) match {
diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala
index cf3b1a66ca..79da1c6e48 100644
--- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala
+++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala
@@ -24,8 +24,9 @@ object HelloWorldPersistentEntityExample {
private val sharding = ClusterSharding(system)
sharding.init(
- Entity(typeKey = HelloWorld.entityTypeKey,
- createBehavior = entityContext => HelloWorld.persistentEntity(entityContext.entityId)))
+ Entity(
+ typeKey = HelloWorld.entityTypeKey,
+ createBehavior = entityContext => HelloWorld.persistentEntity(entityContext.entityId)))
private implicit val askTimeout: Timeout = Timeout(5.seconds)
@@ -79,11 +80,12 @@ object HelloWorldPersistentEntityExample {
EntityTypeKey[Command]("HelloWorld")
def persistentEntity(entityId: String): Behavior[Command] =
- EventSourcedEntity(entityTypeKey = entityTypeKey,
- entityId = entityId,
- emptyState = KnownPeople(Set.empty),
- commandHandler,
- eventHandler)
+ EventSourcedEntity(
+ entityTypeKey = entityTypeKey,
+ entityId = entityId,
+ emptyState = KnownPeople(Set.empty),
+ commandHandler,
+ eventHandler)
}
//#persistent-entity
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala
index 02e9ac25b0..03804af920 100755
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala
@@ -203,21 +203,23 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* for a rebalance or graceful shutdown of a `ShardRegion`, e.g. `PoisonPill`.
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def start(typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- allocationStrategy: ShardAllocationStrategy,
- handOffStopMessage: Any): ActorRef = {
+ def start(
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ allocationStrategy: ShardAllocationStrategy,
+ handOffStopMessage: Any): ActorRef = {
- internalStart(typeName,
- _ => entityProps,
- settings,
- extractEntityId,
- extractShardId,
- allocationStrategy,
- handOffStopMessage)
+ internalStart(
+ typeName,
+ _ => entityProps,
+ settings,
+ extractEntityId,
+ extractShardId,
+ allocationStrategy,
+ handOffStopMessage)
}
/**
@@ -244,45 +246,49 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* for a rebalance or graceful shutdown of a `ShardRegion`, e.g. `PoisonPill`.
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def start(typeName: String,
- entityProps: Props,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- allocationStrategy: ShardAllocationStrategy,
- handOffStopMessage: Any): ActorRef = {
+ def start(
+ typeName: String,
+ entityProps: Props,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ allocationStrategy: ShardAllocationStrategy,
+ handOffStopMessage: Any): ActorRef = {
- start(typeName,
- entityProps,
- ClusterShardingSettings(system),
- extractEntityId,
- extractShardId,
- allocationStrategy,
- handOffStopMessage)
+ start(
+ typeName,
+ entityProps,
+ ClusterShardingSettings(system),
+ extractEntityId,
+ extractShardId,
+ allocationStrategy,
+ handOffStopMessage)
}
/**
* INTERNAL API
*/
- @InternalApi private[akka] def internalStart(typeName: String,
- entityProps: String => Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- allocationStrategy: ShardAllocationStrategy,
- handOffStopMessage: Any): ActorRef = {
+ @InternalApi private[akka] def internalStart(
+ typeName: String,
+ entityProps: String => Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ allocationStrategy: ShardAllocationStrategy,
+ handOffStopMessage: Any): ActorRef = {
if (settings.shouldHostShard(cluster)) {
regions.get(typeName) match {
case null =>
// it's ok to Start several time, the guardian will deduplicate concurrent requests
implicit val timeout = system.settings.CreationTimeout
- val startMsg = Start(typeName,
- entityProps,
- settings,
- extractEntityId,
- extractShardId,
- allocationStrategy,
- handOffStopMessage)
+ val startMsg = Start(
+ typeName,
+ entityProps,
+ settings,
+ extractEntityId,
+ extractShardId,
+ allocationStrategy,
+ handOffStopMessage)
val Started(shardRegion) = Await.result(guardian ? startMsg, timeout.duration)
regions.put(typeName, shardRegion)
shardRegion
@@ -291,11 +297,12 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
} else {
log.debug("Starting Shard Region Proxy [{}] (no actors will be hosted on this node)...", typeName)
- startProxy(typeName,
- settings.role,
- dataCenter = None, // startProxy method must be used directly to start a proxy for another DC
- extractEntityId,
- extractShardId)
+ startProxy(
+ typeName,
+ settings.role,
+ dataCenter = None, // startProxy method must be used directly to start a proxy for another DC
+ extractEntityId,
+ extractShardId)
}
}
@@ -323,11 +330,12 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* that passed the `extractEntityId` will be used
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def start(typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId): ActorRef = {
+ def start(
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId): ActorRef = {
val allocationStrategy = defaultShardAllocationStrategy(settings)
@@ -357,10 +365,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* that passed the `extractEntityId` will be used
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def start(typeName: String,
- entityProps: Props,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId): ActorRef = {
+ def start(
+ typeName: String,
+ entityProps: Props,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId): ActorRef = {
start(typeName, entityProps, ClusterShardingSettings(system), extractEntityId, extractShardId)
}
@@ -387,23 +396,25 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* for a rebalance or graceful shutdown of a `ShardRegion`, e.g. `PoisonPill`.
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def start(typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
- messageExtractor: ShardRegion.MessageExtractor,
- allocationStrategy: ShardAllocationStrategy,
- handOffStopMessage: Any): ActorRef = {
+ def start(
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ messageExtractor: ShardRegion.MessageExtractor,
+ allocationStrategy: ShardAllocationStrategy,
+ handOffStopMessage: Any): ActorRef = {
- internalStart(typeName,
- _ => entityProps,
- settings,
- extractEntityId = {
- case msg if messageExtractor.entityId(msg) ne null =>
- (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
- },
- extractShardId = msg => messageExtractor.shardId(msg),
- allocationStrategy = allocationStrategy,
- handOffStopMessage = handOffStopMessage)
+ internalStart(
+ typeName,
+ _ => entityProps,
+ settings,
+ extractEntityId = {
+ case msg if messageExtractor.entityId(msg) ne null =>
+ (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
+ },
+ extractShardId = msg => messageExtractor.shardId(msg),
+ allocationStrategy = allocationStrategy,
+ handOffStopMessage = handOffStopMessage)
}
/**
@@ -427,10 +438,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* entity from the incoming message
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def start(typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
- messageExtractor: ShardRegion.MessageExtractor): ActorRef = {
+ def start(
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ messageExtractor: ShardRegion.MessageExtractor): ActorRef = {
val allocationStrategy = defaultShardAllocationStrategy(settings)
@@ -480,10 +492,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* that passed the `extractEntityId` will be used
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def startProxy(typeName: String,
- role: Option[String],
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId): ActorRef =
+ def startProxy(
+ typeName: String,
+ role: Option[String],
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId): ActorRef =
startProxy(typeName, role, dataCenter = None, extractEntityId, extractShardId)
/**
@@ -507,11 +520,12 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* that passed the `extractEntityId` will be used
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def startProxy(typeName: String,
- role: Option[String],
- dataCenter: Option[DataCenter],
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId): ActorRef = {
+ def startProxy(
+ typeName: String,
+ role: Option[String],
+ dataCenter: Option[DataCenter],
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId): ActorRef = {
proxies.get(proxyName(typeName, dataCenter)) match {
case null =>
@@ -571,10 +585,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* entity from the incoming message
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
- def startProxy(typeName: String,
- role: Optional[String],
- dataCenter: Optional[String],
- messageExtractor: ShardRegion.MessageExtractor): ActorRef = {
+ def startProxy(
+ typeName: String,
+ role: Optional[String],
+ dataCenter: Optional[String],
+ messageExtractor: ShardRegion.MessageExtractor): ActorRef = {
startProxy(typeName, Option(role.orElse(null)), Option(dataCenter.orElse(null)), extractEntityId = {
case msg if messageExtractor.entityId(msg) ne null =>
@@ -641,19 +656,21 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
*/
private[akka] object ClusterShardingGuardian {
import ShardCoordinator.ShardAllocationStrategy
- final case class Start(typeName: String,
- entityProps: String => Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- allocationStrategy: ShardAllocationStrategy,
- handOffStopMessage: Any)
+ final case class Start(
+ typeName: String,
+ entityProps: String => Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ allocationStrategy: ShardAllocationStrategy,
+ handOffStopMessage: Any)
extends NoSerializationVerificationNeeded
- final case class StartProxy(typeName: String,
- dataCenter: Option[DataCenter],
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId)
+ final case class StartProxy(
+ typeName: String,
+ dataCenter: Option[DataCenter],
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId)
extends NoSerializationVerificationNeeded
final case class Started(shardRegion: ActorRef) extends NoSerializationVerificationNeeded
}
@@ -700,13 +717,14 @@ private[akka] class ClusterShardingGuardian extends Actor {
}
def receive: Receive = {
- case Start(typeName,
- entityProps,
- settings,
- extractEntityId,
- extractShardId,
- allocationStrategy,
- handOffStopMessage) =>
+ case Start(
+ typeName,
+ entityProps,
+ settings,
+ extractEntityId,
+ extractShardId,
+ allocationStrategy,
+ handOffStopMessage) =>
try {
import settings.role
import settings.tuningParameters.coordinatorFailureBackoff
@@ -723,31 +741,34 @@ private[akka] class ClusterShardingGuardian extends Actor {
else
ShardCoordinator.props(typeName, settings, allocationStrategy, rep, majorityMinCap)
val singletonProps = BackoffSupervisor
- .props(childProps = coordinatorProps,
- childName = "coordinator",
- minBackoff = coordinatorFailureBackoff,
- maxBackoff = coordinatorFailureBackoff * 5,
- randomFactor = 0.2,
- maxNrOfRetries = -1)
+ .props(
+ childProps = coordinatorProps,
+ childName = "coordinator",
+ minBackoff = coordinatorFailureBackoff,
+ maxBackoff = coordinatorFailureBackoff * 5,
+ randomFactor = 0.2,
+ maxNrOfRetries = -1)
.withDeploy(Deploy.local)
val singletonSettings = settings.coordinatorSingletonSettings.withSingletonName("singleton").withRole(role)
- context.actorOf(ClusterSingletonManager
- .props(singletonProps, terminationMessage = PoisonPill, singletonSettings)
- .withDispatcher(context.props.dispatcher),
- name = cName)
+ context.actorOf(
+ ClusterSingletonManager
+ .props(singletonProps, terminationMessage = PoisonPill, singletonSettings)
+ .withDispatcher(context.props.dispatcher),
+ name = cName)
}
context.actorOf(
ShardRegion
- .props(typeName = typeName,
- entityProps = entityProps,
- settings = settings,
- coordinatorPath = cPath,
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- handOffStopMessage = handOffStopMessage,
- replicator = rep,
- majorityMinCap)
+ .props(
+ typeName = typeName,
+ entityProps = entityProps,
+ settings = settings,
+ coordinatorPath = cPath,
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ handOffStopMessage = handOffStopMessage,
+ replicator = rep,
+ majorityMinCap)
.withDispatcher(context.props.dispatcher),
name = encName)
}
@@ -772,14 +793,15 @@ private[akka] class ClusterShardingGuardian extends Actor {
val shardRegion = context.child(actorName).getOrElse {
context.actorOf(
ShardRegion
- .proxyProps(typeName = typeName,
- dataCenter = dataCenter,
- settings = settings,
- coordinatorPath = cPath,
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- replicator = context.system.deadLetters,
- majorityMinCap)
+ .proxyProps(
+ typeName = typeName,
+ dataCenter = dataCenter,
+ settings = settings,
+ coordinatorPath = cPath,
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ replicator = context.system.deadLetters,
+ majorityMinCap)
.withDispatcher(context.props.dispatcher),
name = actorName)
}
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala
index 5f03cc55e7..8ba17954b6 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala
@@ -59,14 +59,15 @@ object ClusterShardingSettings {
if (config.getString("passivate-idle-entity-after").toLowerCase == "off") Duration.Zero
else config.getDuration("passivate-idle-entity-after", MILLISECONDS).millis
- new ClusterShardingSettings(role = roleOption(config.getString("role")),
- rememberEntities = config.getBoolean("remember-entities"),
- journalPluginId = config.getString("journal-plugin-id"),
- snapshotPluginId = config.getString("snapshot-plugin-id"),
- stateStoreMode = config.getString("state-store-mode"),
- passivateIdleEntityAfter = passivateIdleAfter,
- tuningParameters,
- coordinatorSingletonSettings)
+ new ClusterShardingSettings(
+ role = roleOption(config.getString("role")),
+ rememberEntities = config.getBoolean("remember-entities"),
+ journalPluginId = config.getString("journal-plugin-id"),
+ snapshotPluginId = config.getString("snapshot-plugin-id"),
+ stateStoreMode = config.getString("state-store-mode"),
+ passivateIdleEntityAfter = passivateIdleAfter,
+ tuningParameters,
+ coordinatorSingletonSettings)
}
/**
@@ -87,93 +88,99 @@ object ClusterShardingSettings {
private[akka] def roleOption(role: String): Option[String] =
if (role == "") None else Option(role)
- class TuningParameters(val coordinatorFailureBackoff: FiniteDuration,
- val retryInterval: FiniteDuration,
- val bufferSize: Int,
- val handOffTimeout: FiniteDuration,
- val shardStartTimeout: FiniteDuration,
- val shardFailureBackoff: FiniteDuration,
- val entityRestartBackoff: FiniteDuration,
- val rebalanceInterval: FiniteDuration,
- val snapshotAfter: Int,
- val keepNrOfBatches: Int,
- val leastShardAllocationRebalanceThreshold: Int,
- val leastShardAllocationMaxSimultaneousRebalance: Int,
- val waitingForStateTimeout: FiniteDuration,
- val updatingStateTimeout: FiniteDuration,
- val entityRecoveryStrategy: String,
- val entityRecoveryConstantRateStrategyFrequency: FiniteDuration,
- val entityRecoveryConstantRateStrategyNumberOfEntities: Int) {
+ class TuningParameters(
+ val coordinatorFailureBackoff: FiniteDuration,
+ val retryInterval: FiniteDuration,
+ val bufferSize: Int,
+ val handOffTimeout: FiniteDuration,
+ val shardStartTimeout: FiniteDuration,
+ val shardFailureBackoff: FiniteDuration,
+ val entityRestartBackoff: FiniteDuration,
+ val rebalanceInterval: FiniteDuration,
+ val snapshotAfter: Int,
+ val keepNrOfBatches: Int,
+ val leastShardAllocationRebalanceThreshold: Int,
+ val leastShardAllocationMaxSimultaneousRebalance: Int,
+ val waitingForStateTimeout: FiniteDuration,
+ val updatingStateTimeout: FiniteDuration,
+ val entityRecoveryStrategy: String,
+ val entityRecoveryConstantRateStrategyFrequency: FiniteDuration,
+ val entityRecoveryConstantRateStrategyNumberOfEntities: Int) {
- require(entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant",
- s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'")
+ require(
+ entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant",
+ s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'")
// included for binary compatibility
- def this(coordinatorFailureBackoff: FiniteDuration,
- retryInterval: FiniteDuration,
- bufferSize: Int,
- handOffTimeout: FiniteDuration,
- shardStartTimeout: FiniteDuration,
- shardFailureBackoff: FiniteDuration,
- entityRestartBackoff: FiniteDuration,
- rebalanceInterval: FiniteDuration,
- snapshotAfter: Int,
- leastShardAllocationRebalanceThreshold: Int,
- leastShardAllocationMaxSimultaneousRebalance: Int,
- waitingForStateTimeout: FiniteDuration,
- updatingStateTimeout: FiniteDuration,
- entityRecoveryStrategy: String,
- entityRecoveryConstantRateStrategyFrequency: FiniteDuration,
- entityRecoveryConstantRateStrategyNumberOfEntities: Int) = {
- this(coordinatorFailureBackoff,
- retryInterval,
- bufferSize,
- handOffTimeout,
- shardStartTimeout,
- shardFailureBackoff,
- entityRestartBackoff,
- rebalanceInterval,
- snapshotAfter,
- 2,
- leastShardAllocationRebalanceThreshold,
- leastShardAllocationMaxSimultaneousRebalance,
- waitingForStateTimeout,
- updatingStateTimeout,
- entityRecoveryStrategy,
- entityRecoveryConstantRateStrategyFrequency,
- entityRecoveryConstantRateStrategyNumberOfEntities)
+ def this(
+ coordinatorFailureBackoff: FiniteDuration,
+ retryInterval: FiniteDuration,
+ bufferSize: Int,
+ handOffTimeout: FiniteDuration,
+ shardStartTimeout: FiniteDuration,
+ shardFailureBackoff: FiniteDuration,
+ entityRestartBackoff: FiniteDuration,
+ rebalanceInterval: FiniteDuration,
+ snapshotAfter: Int,
+ leastShardAllocationRebalanceThreshold: Int,
+ leastShardAllocationMaxSimultaneousRebalance: Int,
+ waitingForStateTimeout: FiniteDuration,
+ updatingStateTimeout: FiniteDuration,
+ entityRecoveryStrategy: String,
+ entityRecoveryConstantRateStrategyFrequency: FiniteDuration,
+ entityRecoveryConstantRateStrategyNumberOfEntities: Int) = {
+ this(
+ coordinatorFailureBackoff,
+ retryInterval,
+ bufferSize,
+ handOffTimeout,
+ shardStartTimeout,
+ shardFailureBackoff,
+ entityRestartBackoff,
+ rebalanceInterval,
+ snapshotAfter,
+ 2,
+ leastShardAllocationRebalanceThreshold,
+ leastShardAllocationMaxSimultaneousRebalance,
+ waitingForStateTimeout,
+ updatingStateTimeout,
+ entityRecoveryStrategy,
+ entityRecoveryConstantRateStrategyFrequency,
+ entityRecoveryConstantRateStrategyNumberOfEntities)
}
// included for binary compatibility
- def this(coordinatorFailureBackoff: FiniteDuration,
- retryInterval: FiniteDuration,
- bufferSize: Int,
- handOffTimeout: FiniteDuration,
- shardStartTimeout: FiniteDuration,
- shardFailureBackoff: FiniteDuration,
- entityRestartBackoff: FiniteDuration,
- rebalanceInterval: FiniteDuration,
- snapshotAfter: Int,
- leastShardAllocationRebalanceThreshold: Int,
- leastShardAllocationMaxSimultaneousRebalance: Int,
- waitingForStateTimeout: FiniteDuration,
- updatingStateTimeout: FiniteDuration) = {
- this(coordinatorFailureBackoff,
- retryInterval,
- bufferSize,
- handOffTimeout,
- shardStartTimeout,
- shardFailureBackoff,
- entityRestartBackoff,
- rebalanceInterval,
- snapshotAfter,
- leastShardAllocationRebalanceThreshold,
- leastShardAllocationMaxSimultaneousRebalance,
- waitingForStateTimeout,
- updatingStateTimeout,
- "all",
- 100.milliseconds,
- 5)
+ def this(
+ coordinatorFailureBackoff: FiniteDuration,
+ retryInterval: FiniteDuration,
+ bufferSize: Int,
+ handOffTimeout: FiniteDuration,
+ shardStartTimeout: FiniteDuration,
+ shardFailureBackoff: FiniteDuration,
+ entityRestartBackoff: FiniteDuration,
+ rebalanceInterval: FiniteDuration,
+ snapshotAfter: Int,
+ leastShardAllocationRebalanceThreshold: Int,
+ leastShardAllocationMaxSimultaneousRebalance: Int,
+ waitingForStateTimeout: FiniteDuration,
+ updatingStateTimeout: FiniteDuration) = {
+ this(
+ coordinatorFailureBackoff,
+ retryInterval,
+ bufferSize,
+ handOffTimeout,
+ shardStartTimeout,
+ shardFailureBackoff,
+ entityRestartBackoff,
+ rebalanceInterval,
+ snapshotAfter,
+ leastShardAllocationRebalanceThreshold,
+ leastShardAllocationMaxSimultaneousRebalance,
+ waitingForStateTimeout,
+ updatingStateTimeout,
+ "all",
+ 100.milliseconds,
+ 5)
}
}
@@ -198,39 +205,43 @@ object ClusterShardingSettings {
* Use 0 to disable automatic passivation.
* @param tuningParameters additional tuning parameters, see descriptions in reference.conf
*/
-final class ClusterShardingSettings(val role: Option[String],
- val rememberEntities: Boolean,
- val journalPluginId: String,
- val snapshotPluginId: String,
- val stateStoreMode: String,
- val passivateIdleEntityAfter: FiniteDuration,
- val tuningParameters: ClusterShardingSettings.TuningParameters,
- val coordinatorSingletonSettings: ClusterSingletonManagerSettings)
+final class ClusterShardingSettings(
+ val role: Option[String],
+ val rememberEntities: Boolean,
+ val journalPluginId: String,
+ val snapshotPluginId: String,
+ val stateStoreMode: String,
+ val passivateIdleEntityAfter: FiniteDuration,
+ val tuningParameters: ClusterShardingSettings.TuningParameters,
+ val coordinatorSingletonSettings: ClusterSingletonManagerSettings)
extends NoSerializationVerificationNeeded {
// included for binary compatibility reasons
@deprecated(
"Use the ClusterShardingSettings factory methods or the constructor including passivateIdleEntityAfter instead",
"2.5.18")
- def this(role: Option[String],
- rememberEntities: Boolean,
- journalPluginId: String,
- snapshotPluginId: String,
- stateStoreMode: String,
- tuningParameters: ClusterShardingSettings.TuningParameters,
- coordinatorSingletonSettings: ClusterSingletonManagerSettings) =
- this(role,
- rememberEntities,
- journalPluginId,
- snapshotPluginId,
- stateStoreMode,
- Duration.Zero,
- tuningParameters,
- coordinatorSingletonSettings)
+ def this(
+ role: Option[String],
+ rememberEntities: Boolean,
+ journalPluginId: String,
+ snapshotPluginId: String,
+ stateStoreMode: String,
+ tuningParameters: ClusterShardingSettings.TuningParameters,
+ coordinatorSingletonSettings: ClusterSingletonManagerSettings) =
+ this(
+ role,
+ rememberEntities,
+ journalPluginId,
+ snapshotPluginId,
+ stateStoreMode,
+ Duration.Zero,
+ tuningParameters,
+ coordinatorSingletonSettings)
import ClusterShardingSettings.{ StateStoreModeDData, StateStoreModePersistence }
- require(stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData,
- s"Unknown 'state-store-mode' [$stateStoreMode], valid values are '$StateStoreModeDData' or '$StateStoreModePersistence'")
+ require(
+ stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData,
+ s"Unknown 'state-store-mode' [$stateStoreMode], valid values are '$StateStoreModeDData' or '$StateStoreModePersistence'")
/** If true, this node should run the shard region, otherwise just a shard proxy should started on this node. */
@InternalApi
@@ -270,21 +281,23 @@ final class ClusterShardingSettings(val role: Option[String],
coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings =
copy(coordinatorSingletonSettings = coordinatorSingletonSettings)
- private def copy(role: Option[String] = role,
- rememberEntities: Boolean = rememberEntities,
- journalPluginId: String = journalPluginId,
- snapshotPluginId: String = snapshotPluginId,
- stateStoreMode: String = stateStoreMode,
- passivateIdleAfter: FiniteDuration = passivateIdleEntityAfter,
- tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters,
- coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings)
+ private def copy(
+ role: Option[String] = role,
+ rememberEntities: Boolean = rememberEntities,
+ journalPluginId: String = journalPluginId,
+ snapshotPluginId: String = snapshotPluginId,
+ stateStoreMode: String = stateStoreMode,
+ passivateIdleAfter: FiniteDuration = passivateIdleEntityAfter,
+ tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters,
+ coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings)
: ClusterShardingSettings =
- new ClusterShardingSettings(role,
- rememberEntities,
- journalPluginId,
- snapshotPluginId,
- stateStoreMode,
- passivateIdleAfter,
- tuningParameters,
- coordinatorSingletonSettings)
+ new ClusterShardingSettings(
+ role,
+ rememberEntities,
+ journalPluginId,
+ snapshotPluginId,
+ stateStoreMode,
+ passivateIdleAfter,
+ tuningParameters,
+ coordinatorSingletonSettings)
}
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala
index 6caf52312e..a9e297c3e9 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala
@@ -81,11 +81,12 @@ object RemoveInternalClusterShardingData {
* API corresponding to the [[#main]] method as described in the
* [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]]
*/
- def remove(system: ActorSystem,
- journalPluginId: String,
- typeNames: Set[String],
- terminateSystem: Boolean,
- remove2dot3Data: Boolean): Future[Unit] = {
+ def remove(
+ system: ActorSystem,
+ journalPluginId: String,
+ typeNames: Set[String],
+ terminateSystem: Boolean,
+ remove2dot3Data: Boolean): Future[Unit] = {
val resolvedJournalPluginId =
if (journalPluginId == "") system.settings.config.getString("akka.persistence.journal.plugin")
@@ -96,18 +97,20 @@ object RemoveInternalClusterShardingData {
}
val completion = Promise[Unit]()
- system.actorOf(props(journalPluginId, typeNames, completion, remove2dot3Data),
- name = "removeInternalClusterShardingData")
+ system.actorOf(
+ props(journalPluginId, typeNames, completion, remove2dot3Data),
+ name = "removeInternalClusterShardingData")
completion.future
}
/**
* INTERNAL API: `Props` for [[RemoveInternalClusterShardingData]] actor.
*/
- private[akka] def props(journalPluginId: String,
- typeNames: Set[String],
- completion: Promise[Unit],
- remove2dot3Data: Boolean): Props =
+ private[akka] def props(
+ journalPluginId: String,
+ typeNames: Set[String],
+ completion: Promise[Unit],
+ remove2dot3Data: Boolean): Props =
Props(new RemoveInternalClusterShardingData(journalPluginId, typeNames, completion, remove2dot3Data))
.withDeploy(Deploy.local)
@@ -127,9 +130,10 @@ object RemoveInternalClusterShardingData {
* `persistenceId`. It will reply with `RemoveOnePersistenceId.Result`
* when done.
*/
- private[akka] class RemoveOnePersistenceId(override val journalPluginId: String,
- override val persistenceId: String,
- replyTo: ActorRef)
+ private[akka] class RemoveOnePersistenceId(
+ override val journalPluginId: String,
+ override val persistenceId: String,
+ replyTo: ActorRef)
extends PersistentActor {
import RemoveInternalClusterShardingData.RemoveOnePersistenceId._
@@ -189,10 +193,11 @@ object RemoveInternalClusterShardingData {
/**
* @see [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]]
*/
-class RemoveInternalClusterShardingData(journalPluginId: String,
- typeNames: Set[String],
- completion: Promise[Unit],
- remove2dot3Data: Boolean)
+class RemoveInternalClusterShardingData(
+ journalPluginId: String,
+ typeNames: Set[String],
+ completion: Promise[Unit],
+ remove2dot3Data: Boolean)
extends Actor
with ActorLogging {
import RemoveInternalClusterShardingData._
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala
index f972b414b0..3d155083af 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala
@@ -95,35 +95,38 @@ private[akka] object Shard {
* If `settings.rememberEntities` is enabled the `PersistentShard`
* subclass is used, otherwise `Shard`.
*/
- def props(typeName: String,
- shardId: ShardRegion.ShardId,
- entityProps: String => Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- handOffStopMessage: Any,
- replicator: ActorRef,
- majorityMinCap: Int): Props = {
+ def props(
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ entityProps: String => Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ handOffStopMessage: Any,
+ replicator: ActorRef,
+ majorityMinCap: Int): Props = {
if (settings.rememberEntities && settings.stateStoreMode == ClusterShardingSettings.StateStoreModeDData) {
Props(
- new DDataShard(typeName,
- shardId,
- entityProps,
- settings,
- extractEntityId,
- extractShardId,
- handOffStopMessage,
- replicator,
- majorityMinCap)).withDeploy(Deploy.local)
+ new DDataShard(
+ typeName,
+ shardId,
+ entityProps,
+ settings,
+ extractEntityId,
+ extractShardId,
+ handOffStopMessage,
+ replicator,
+ majorityMinCap)).withDeploy(Deploy.local)
} else if (settings.rememberEntities && settings.stateStoreMode == ClusterShardingSettings.StateStoreModePersistence)
Props(
- new PersistentShard(typeName,
- shardId,
- entityProps,
- settings,
- extractEntityId,
- extractShardId,
- handOffStopMessage)).withDeploy(Deploy.local)
+ new PersistentShard(
+ typeName,
+ shardId,
+ entityProps,
+ settings,
+ extractEntityId,
+ extractShardId,
+ handOffStopMessage)).withDeploy(Deploy.local)
else
Props(new Shard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage))
.withDeploy(Deploy.local)
@@ -141,13 +144,14 @@ private[akka] object Shard {
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
-private[akka] class Shard(typeName: String,
- shardId: ShardRegion.ShardId,
- entityProps: String => Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- handOffStopMessage: Any)
+private[akka] class Shard(
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ entityProps: String => Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ handOffStopMessage: Any)
extends Actor
with ActorLogging {
@@ -207,10 +211,11 @@ private[akka] class Shard(typeName: String,
if (passivateIdleTask.isDefined) {
lastMessageTimestamp = lastMessageTimestamp.updated(start.entityId, System.nanoTime())
}
- getOrCreateEntity(start.entityId,
- _ =>
- processChange(EntityStarted(start.entityId))(_ =>
- requester ! ShardRegion.StartEntityAck(start.entityId, shardId)))
+ getOrCreateEntity(
+ start.entityId,
+ _ =>
+ processChange(EntityStarted(start.entityId))(_ =>
+ requester ! ShardRegion.StartEntityAck(start.entityId, shardId)))
}
def receiveStartEntityAck(ack: ShardRegion.StartEntityAck): Unit = {
@@ -394,12 +399,13 @@ private[akka] class Shard(typeName: String,
}
private[akka] object RememberEntityStarter {
- def props(region: ActorRef,
- typeName: String,
- shardId: ShardRegion.ShardId,
- ids: Set[ShardRegion.EntityId],
- settings: ClusterShardingSettings,
- requestor: ActorRef) =
+ def props(
+ region: ActorRef,
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ ids: Set[ShardRegion.EntityId],
+ settings: ClusterShardingSettings,
+ requestor: ActorRef) =
Props(new RememberEntityStarter(region, typeName, shardId, ids, settings, requestor))
private case object Tick extends NoSerializationVerificationNeeded
@@ -408,12 +414,13 @@ private[akka] object RememberEntityStarter {
/**
* INTERNAL API: Actor responsible for starting entities when rememberEntities is enabled
*/
-private[akka] class RememberEntityStarter(region: ActorRef,
- typeName: String,
- shardId: ShardRegion.ShardId,
- ids: Set[ShardRegion.EntityId],
- settings: ClusterShardingSettings,
- requestor: ActorRef)
+private[akka] class RememberEntityStarter(
+ region: ActorRef,
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ ids: Set[ShardRegion.EntityId],
+ settings: ClusterShardingSettings,
+ requestor: ActorRef)
extends Actor
with ActorLogging {
@@ -469,9 +476,10 @@ private[akka] trait RememberingShard {
entityRecoveryStrategy match {
case "all" => EntityRecoveryStrategy.allStrategy()
case "constant" =>
- EntityRecoveryStrategy.constantStrategy(context.system,
- entityRecoveryConstantRateStrategyFrequency,
- entityRecoveryConstantRateStrategyNumberOfEntities)
+ EntityRecoveryStrategy.constantStrategy(
+ context.system,
+ entityRecoveryConstantRateStrategyFrequency,
+ entityRecoveryConstantRateStrategyNumberOfEntities)
}
}
@@ -533,13 +541,14 @@ private[akka] trait RememberingShard {
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
-private[akka] class PersistentShard(typeName: String,
- shardId: ShardRegion.ShardId,
- entityProps: String => Props,
- override val settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- handOffStopMessage: Any)
+private[akka] class PersistentShard(
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ entityProps: String => Props,
+ override val settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ handOffStopMessage: Any)
extends Shard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage)
with RememberingShard
with PersistentActor
@@ -593,10 +602,11 @@ private[akka] class PersistentShard(typeName: String,
case DeleteMessagesSuccess(toSequenceNr) =>
val deleteTo = toSequenceNr - 1
val deleteFrom = math.max(0, deleteTo - (keepNrOfBatches * snapshotAfter))
- log.debug("PersistentShard messages to [{}] deleted successfully. Deleting snapshots from [{}] to [{}]",
- toSequenceNr,
- deleteFrom,
- deleteTo)
+ log.debug(
+ "PersistentShard messages to [{}] deleted successfully. Deleting snapshots from [{}] to [{}]",
+ toSequenceNr,
+ deleteFrom,
+ deleteTo)
deleteSnapshots(SnapshotSelectionCriteria(minSequenceNr = deleteFrom, maxSequenceNr = deleteTo))
case DeleteMessagesFailure(reason, toSequenceNr) =>
@@ -621,15 +631,16 @@ private[akka] class PersistentShard(typeName: String,
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
-private[akka] class DDataShard(typeName: String,
- shardId: ShardRegion.ShardId,
- entityProps: String => Props,
- override val settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- handOffStopMessage: Any,
- replicator: ActorRef,
- majorityMinCap: Int)
+private[akka] class DDataShard(
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ entityProps: String => Props,
+ override val settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ handOffStopMessage: Any,
+ replicator: ActorRef,
+ majorityMinCap: Int)
extends Shard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage)
with RememberingShard
with Stash
@@ -691,8 +702,9 @@ private[akka] class DDataShard(typeName: String,
receiveOne(i)
case GetFailure(_, _) =>
- log.error("The DDataShard was unable to get an initial state within 'waiting-for-state-timeout': {} millis",
- waitingForStateTimeout.toMillis)
+ log.error(
+ "The DDataShard was unable to get an initial state within 'waiting-for-state-timeout': {} millis",
+ waitingForStateTimeout.toMillis)
// parent ShardRegion supervisor will notice that it terminated and will start it again, after backoff
context.stop(self)
@@ -755,10 +767,11 @@ private[akka] class DDataShard(typeName: String,
}
case ModifyFailure(_, error, cause, Some((`evt`, _))) =>
- log.error(cause,
- "The DDataShard was unable to update state with error {} and event {}. Shard will be restarted",
- error,
- evt)
+ log.error(
+ cause,
+ "The DDataShard was unable to update state with error {} and event {}. Shard will be restarted",
+ error,
+ evt)
throw cause
case _ => stash()
@@ -769,9 +782,10 @@ private[akka] class DDataShard(typeName: String,
object EntityRecoveryStrategy {
def allStrategy(): EntityRecoveryStrategy = new AllAtOnceEntityRecoveryStrategy()
- def constantStrategy(actorSystem: ActorSystem,
- frequency: FiniteDuration,
- numberOfEntities: Int): EntityRecoveryStrategy =
+ def constantStrategy(
+ actorSystem: ActorSystem,
+ frequency: FiniteDuration,
+ numberOfEntities: Int): EntityRecoveryStrategy =
new ConstantRateEntityRecoveryStrategy(actorSystem, frequency, numberOfEntities)
}
@@ -791,9 +805,10 @@ final class AllAtOnceEntityRecoveryStrategy extends EntityRecoveryStrategy {
if (entities.isEmpty) Set.empty else Set(Future.successful(entities))
}
-final class ConstantRateEntityRecoveryStrategy(actorSystem: ActorSystem,
- frequency: FiniteDuration,
- numberOfEntities: Int)
+final class ConstantRateEntityRecoveryStrategy(
+ actorSystem: ActorSystem,
+ frequency: FiniteDuration,
+ numberOfEntities: Int)
extends EntityRecoveryStrategy {
import ShardRegion.EntityId
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala
index 878c5adfc3..a739b7cbb1 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala
@@ -38,27 +38,30 @@ object ShardCoordinator {
* INTERNAL API
* Factory method for the [[akka.actor.Props]] of the [[ShardCoordinator]] actor.
*/
- private[akka] def props(typeName: String,
- settings: ClusterShardingSettings,
- allocationStrategy: ShardAllocationStrategy): Props =
+ private[akka] def props(
+ typeName: String,
+ settings: ClusterShardingSettings,
+ allocationStrategy: ShardAllocationStrategy): Props =
Props(new PersistentShardCoordinator(typeName: String, settings, allocationStrategy)).withDeploy(Deploy.local)
/**
* INTERNAL API
* Factory method for the [[akka.actor.Props]] of the [[ShardCoordinator]] actor with state based on ddata.
*/
- private[akka] def props(typeName: String,
- settings: ClusterShardingSettings,
- allocationStrategy: ShardAllocationStrategy,
- replicator: ActorRef,
- majorityMinCap: Int): Props =
+ private[akka] def props(
+ typeName: String,
+ settings: ClusterShardingSettings,
+ allocationStrategy: ShardAllocationStrategy,
+ replicator: ActorRef,
+ majorityMinCap: Int): Props =
Props(
- new DDataShardCoordinator(typeName: String,
- settings,
- allocationStrategy,
- replicator,
- majorityMinCap,
- settings.rememberEntities)).withDeploy(Deploy.local)
+ new DDataShardCoordinator(
+ typeName: String,
+ settings,
+ allocationStrategy,
+ replicator,
+ majorityMinCap,
+ settings.rememberEntities)).withDeploy(Deploy.local)
/**
* Interface of the pluggable shard allocation and rebalancing logic used by the [[ShardCoordinator]].
@@ -77,9 +80,10 @@ object ShardCoordinator {
* @return a `Future` of the actor ref of the [[ShardRegion]] that is to be responsible for the shard, must be one of
* the references included in the `currentShardAllocations` parameter
*/
- def allocateShard(requester: ActorRef,
- shardId: ShardId,
- currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef]
+ def allocateShard(
+ requester: ActorRef,
+ shardId: ShardId,
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef]
/**
* Invoked periodically to decide which shards to rebalance to another location.
@@ -89,8 +93,9 @@ object ShardCoordinator {
* you should not include these in the returned set
* @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round
*/
- def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
- rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]]
+ def rebalance(
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
+ rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]]
}
/**
@@ -107,8 +112,9 @@ object ShardCoordinator {
allocateShard(requester, shardId, currentShardAllocations.asJava)
}
- override final def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
- rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
+ override final def rebalance(
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
+ rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
import scala.collection.JavaConverters._
implicit val ec = ExecutionContexts.sameThreadExecutionContext
rebalance(currentShardAllocations.asJava, rebalanceInProgress.asJava).map(_.asScala.toSet)
@@ -124,9 +130,10 @@ object ShardCoordinator {
* @return a `Future` of the actor ref of the [[ShardRegion]] that is to be responsible for the shard, must be one of
* the references included in the `currentShardAllocations` parameter
*/
- def allocateShard(requester: ActorRef,
- shardId: String,
- currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]]): Future[ActorRef]
+ def allocateShard(
+ requester: ActorRef,
+ shardId: String,
+ currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]]): Future[ActorRef]
/**
* Invoked periodically to decide which shards to rebalance to another location.
@@ -136,8 +143,9 @@ object ShardCoordinator {
* you should not include these in the returned set
* @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round
*/
- def rebalance(currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]],
- rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]]
+ def rebalance(
+ currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]],
+ rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]]
}
private val emptyRebalanceResult = Future.successful(Set.empty[ShardId])
@@ -179,8 +187,9 @@ object ShardCoordinator {
Future.successful(regionWithLeastShards)
}
- override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
- rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
+ override def rebalance(
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
+ rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
if (rebalanceInProgress.size < maxSimultaneousRebalance) {
val (regionWithLeastShards, leastShards) = currentShardAllocations.minBy { case (_, v) => v.size }
val mostShards = currentShardAllocations
@@ -190,8 +199,9 @@ object ShardCoordinator {
.maxBy(_.size)
val difference = mostShards.size - leastShards.size
if (difference > rebalanceThreshold) {
- val n = math.min(math.min(difference - rebalanceThreshold, rebalanceThreshold),
- maxSimultaneousRebalance - rebalanceInProgress.size)
+ val n = math.min(
+ math.min(difference - rebalanceThreshold, rebalanceThreshold),
+ maxSimultaneousRebalance - rebalanceInProgress.size)
Future.successful(mostShards.sorted.take(n).toSet)
} else
emptyRebalanceResult
@@ -310,13 +320,13 @@ object ShardCoordinator {
* Persistent state of the event sourced ShardCoordinator.
*/
@SerialVersionUID(1L) final case class State private[akka] (
- // region for each shard
- shards: Map[ShardId, ActorRef] = Map.empty,
- // shards for each region
- regions: Map[ActorRef, Vector[ShardId]] = Map.empty,
- regionProxies: Set[ActorRef] = Set.empty,
- unallocatedShards: Set[ShardId] = Set.empty,
- rememberEntities: Boolean = false)
+ // region for each shard
+ shards: Map[ShardId, ActorRef] = Map.empty,
+ // shards for each region
+ regions: Map[ActorRef, Vector[ShardId]] = Map.empty,
+ regionProxies: Set[ActorRef] = Set.empty,
+ unallocatedShards: Set[ShardId] = Set.empty,
+ rememberEntities: Boolean = false)
extends ClusterShardingSerializable {
def withRememberEntities(enabled: Boolean): State = {
@@ -351,18 +361,20 @@ object ShardCoordinator {
require(!shards.contains(shard), s"Shard [$shard] already allocated: $this")
val newUnallocatedShards =
if (rememberEntities) (unallocatedShards - shard) else unallocatedShards
- copy(shards = shards.updated(shard, region),
- regions = regions.updated(region, regions(region) :+ shard),
- unallocatedShards = newUnallocatedShards)
+ copy(
+ shards = shards.updated(shard, region),
+ regions = regions.updated(region, regions(region) :+ shard),
+ unallocatedShards = newUnallocatedShards)
case ShardHomeDeallocated(shard) =>
require(shards.contains(shard), s"Shard [$shard] not allocated: $this")
val region = shards(shard)
require(regions.contains(region), s"Region $region for shard [$shard] not registered: $this")
val newUnallocatedShards =
if (rememberEntities) (unallocatedShards + shard) else unallocatedShards
- copy(shards = shards - shard,
- regions = regions.updated(region, regions(region).filterNot(_ == shard)),
- unallocatedShards = newUnallocatedShards)
+ copy(
+ shards = shards - shard,
+ regions = regions.updated(region, regions(region).filterNot(_ == shard)),
+ unallocatedShards = newUnallocatedShards)
}
}
@@ -388,9 +400,10 @@ object ShardCoordinator {
/**
* Result of `allocateShard` is piped to self with this message.
*/
- private final case class AllocateShardResult(shard: ShardId,
- shardRegion: Option[ActorRef],
- getShardHomeSender: ActorRef)
+ private final case class AllocateShardResult(
+ shard: ShardId,
+ shardRegion: Option[ActorRef],
+ getShardHomeSender: ActorRef)
/**
* Result of `rebalance` is piped to self with this message.
@@ -405,10 +418,11 @@ object ShardCoordinator {
* parent `ShardCoordinator`. If the process takes longer than the
* `handOffTimeout` it also sends [[akka.cluster.sharding.RebalanceDone]].
*/
- private[akka] class RebalanceWorker(shard: String,
- from: ActorRef,
- handOffTimeout: FiniteDuration,
- regions: Set[ActorRef])
+ private[akka] class RebalanceWorker(
+ shard: String,
+ from: ActorRef,
+ handOffTimeout: FiniteDuration,
+ regions: Set[ActorRef])
extends Actor {
import Internal._
regions.foreach(_ ! BeginHandOff(shard))
@@ -438,10 +452,11 @@ object ShardCoordinator {
}
}
- private[akka] def rebalanceWorkerProps(shard: String,
- from: ActorRef,
- handOffTimeout: FiniteDuration,
- regions: Set[ActorRef]): Props =
+ private[akka] def rebalanceWorkerProps(
+ shard: String,
+ from: ActorRef,
+ handOffTimeout: FiniteDuration,
+ regions: Set[ActorRef]): Props =
Props(new RebalanceWorker(shard, from, handOffTimeout, regions))
}
@@ -451,9 +466,10 @@ object ShardCoordinator {
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
-abstract class ShardCoordinator(typeName: String,
- settings: ClusterShardingSettings,
- allocationStrategy: ShardCoordinator.ShardAllocationStrategy)
+abstract class ShardCoordinator(
+ typeName: String,
+ settings: ClusterShardingSettings,
+ allocationStrategy: ShardCoordinator.ShardAllocationStrategy)
extends Actor
with ActorLogging {
import ShardCoordinator._
@@ -687,10 +703,11 @@ abstract class ShardCoordinator(typeName: String,
}
private def deferGetShardHomeRequest(shard: ShardId, from: ActorRef): Unit = {
- log.debug("GetShardHome [{}] request from [{}] deferred, because rebalance is in progress for this shard. " +
- "It will be handled when rebalance is done.",
- shard,
- from)
+ log.debug(
+ "GetShardHome [{}] request from [{}] deferred, because rebalance is in progress for this shard. " +
+ "It will be handled when rebalance is done.",
+ shard,
+ from)
rebalanceInProgress = rebalanceInProgress.updated(shard, rebalanceInProgress(shard) + from)
}
@@ -837,10 +854,11 @@ abstract class ShardCoordinator(typeName: String,
getShardHomeSender ! ShardHome(evt.shard, evt.region)
}
} else
- log.debug("Allocated region {} for shard [{}] is not (any longer) one of the registered regions: {}",
- region,
- shard,
- state)
+ log.debug(
+ "Allocated region {} for shard [{}] is not (any longer) one of the registered regions: {}",
+ region,
+ shard,
+ state)
}
}
@@ -871,9 +889,10 @@ abstract class ShardCoordinator(typeName: String,
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
-class PersistentShardCoordinator(typeName: String,
- settings: ClusterShardingSettings,
- allocationStrategy: ShardCoordinator.ShardAllocationStrategy)
+class PersistentShardCoordinator(
+ typeName: String,
+ settings: ClusterShardingSettings,
+ allocationStrategy: ShardCoordinator.ShardAllocationStrategy)
extends ShardCoordinator(typeName, settings, allocationStrategy)
with PersistentActor {
import ShardCoordinator.Internal._
@@ -975,12 +994,13 @@ class PersistentShardCoordinator(typeName: String,
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
-class DDataShardCoordinator(typeName: String,
- settings: ClusterShardingSettings,
- allocationStrategy: ShardCoordinator.ShardAllocationStrategy,
- replicator: ActorRef,
- majorityMinCap: Int,
- rememberEntities: Boolean)
+class DDataShardCoordinator(
+ typeName: String,
+ settings: ClusterShardingSettings,
+ allocationStrategy: ShardCoordinator.ShardAllocationStrategy,
+ replicator: ActorRef,
+ majorityMinCap: Int,
+ rememberEntities: Boolean)
extends ShardCoordinator(typeName, settings, allocationStrategy)
with Stash {
import ShardCoordinator.Internal._
@@ -1083,9 +1103,10 @@ class DDataShardCoordinator(typeName: String,
}
// this state will stash all messages until it receives UpdateSuccess
- def waitingForUpdate[E <: DomainEvent](evt: E,
- afterUpdateCallback: E => Unit,
- remainingKeys: Set[Key[ReplicatedData]]): Receive = {
+ def waitingForUpdate[E <: DomainEvent](
+ evt: E,
+ afterUpdateCallback: E => Unit,
+ remainingKeys: Set[Key[ReplicatedData]]): Receive = {
case UpdateSuccess(CoordinatorStateKey, Some(`evt`)) =>
log.debug("The coordinator state was successfully updated with {}", evt)
val newRemainingKeys = remainingKeys - CoordinatorStateKey
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala
index a7e5674f0d..57af950a85 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala
@@ -34,51 +34,55 @@ object ShardRegion {
* INTERNAL API
* Factory method for the [[akka.actor.Props]] of the [[ShardRegion]] actor.
*/
- private[akka] def props(typeName: String,
- entityProps: String => Props,
- settings: ClusterShardingSettings,
- coordinatorPath: String,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- handOffStopMessage: Any,
- replicator: ActorRef,
- majorityMinCap: Int): Props =
+ private[akka] def props(
+ typeName: String,
+ entityProps: String => Props,
+ settings: ClusterShardingSettings,
+ coordinatorPath: String,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ handOffStopMessage: Any,
+ replicator: ActorRef,
+ majorityMinCap: Int): Props =
Props(
- new ShardRegion(typeName,
- Some(entityProps),
- dataCenter = None,
- settings,
- coordinatorPath,
- extractEntityId,
- extractShardId,
- handOffStopMessage,
- replicator,
- majorityMinCap)).withDeploy(Deploy.local)
+ new ShardRegion(
+ typeName,
+ Some(entityProps),
+ dataCenter = None,
+ settings,
+ coordinatorPath,
+ extractEntityId,
+ extractShardId,
+ handOffStopMessage,
+ replicator,
+ majorityMinCap)).withDeploy(Deploy.local)
/**
* INTERNAL API
* Factory method for the [[akka.actor.Props]] of the [[ShardRegion]] actor
* when using it in proxy only mode.
*/
- private[akka] def proxyProps(typeName: String,
- dataCenter: Option[DataCenter],
- settings: ClusterShardingSettings,
- coordinatorPath: String,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- replicator: ActorRef,
- majorityMinCap: Int): Props =
+ private[akka] def proxyProps(
+ typeName: String,
+ dataCenter: Option[DataCenter],
+ settings: ClusterShardingSettings,
+ coordinatorPath: String,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ replicator: ActorRef,
+ majorityMinCap: Int): Props =
Props(
- new ShardRegion(typeName,
- None,
- dataCenter,
- settings,
- coordinatorPath,
- extractEntityId,
- extractShardId,
- PoisonPill,
- replicator,
- majorityMinCap)).withDeploy(Deploy.local)
+ new ShardRegion(
+ typeName,
+ None,
+ dataCenter,
+ settings,
+ coordinatorPath,
+ extractEntityId,
+ extractShardId,
+ PoisonPill,
+ replicator,
+ majorityMinCap)).withDeploy(Deploy.local)
/**
* Marker type of entity identifier (`String`).
@@ -366,11 +370,12 @@ object ShardRegion {
* them have terminated it replies with `ShardStopped`.
* If the entities don't terminate after `handoffTimeout` it will try stopping them forcefully.
*/
- private[akka] class HandOffStopper(shard: String,
- replyTo: ActorRef,
- entities: Set[ActorRef],
- stopMessage: Any,
- handoffTimeout: FiniteDuration)
+ private[akka] class HandOffStopper(
+ shard: String,
+ replyTo: ActorRef,
+ entities: Set[ActorRef],
+ stopMessage: Any,
+ handoffTimeout: FiniteDuration)
extends Actor
with ActorLogging {
import ShardCoordinator.Internal.ShardStopped
@@ -386,10 +391,11 @@ object ShardRegion {
def receive = {
case ReceiveTimeout =>
- log.warning("HandOffStopMessage[{}] is not handled by some of the entities of the `{}` shard, " +
- "stopping the remaining entities.",
- stopMessage.getClass.getName,
- shard)
+ log.warning(
+ "HandOffStopMessage[{}] is not handled by some of the entities of the `{}` shard, " +
+ "stopping the remaining entities.",
+ stopMessage.getClass.getName,
+ shard)
remaining.foreach { ref =>
context.stop(ref)
@@ -404,11 +410,12 @@ object ShardRegion {
}
}
- private[akka] def handOffStopperProps(shard: String,
- replyTo: ActorRef,
- entities: Set[ActorRef],
- stopMessage: Any,
- handoffTimeout: FiniteDuration): Props =
+ private[akka] def handOffStopperProps(
+ shard: String,
+ replyTo: ActorRef,
+ entities: Set[ActorRef],
+ stopMessage: Any,
+ handoffTimeout: FiniteDuration): Props =
Props(new HandOffStopper(shard, replyTo, entities, stopMessage, handoffTimeout)).withDeploy(Deploy.local)
}
@@ -421,16 +428,17 @@ object ShardRegion {
*
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
-private[akka] class ShardRegion(typeName: String,
- entityProps: Option[String => Props],
- dataCenter: Option[DataCenter],
- settings: ClusterShardingSettings,
- coordinatorPath: String,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- handOffStopMessage: Any,
- replicator: ActorRef,
- majorityMinCap: Int)
+private[akka] class ShardRegion(
+ typeName: String,
+ entityProps: Option[String => Props],
+ dataCenter: Option[DataCenter],
+ settings: ClusterShardingSettings,
+ coordinatorPath: String,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ handOffStopMessage: Any,
+ replicator: ActorRef,
+ majorityMinCap: Int)
extends Actor
with ActorLogging {
@@ -513,9 +521,10 @@ private[akka] class ShardRegion(typeName: String,
membersByAge = newMembers
if (before != after) {
if (log.isDebugEnabled)
- log.debug("Coordinator moved from [{}] to [{}]",
- before.map(_.address).getOrElse(""),
- after.map(_.address).getOrElse(""))
+ log.debug(
+ "Coordinator moved from [{}] to [{}]",
+ before.map(_.address).getOrElse(""),
+ after.map(_.address).getOrElse(""))
coordinator = None
register()
}
@@ -533,9 +542,10 @@ private[akka] class ShardRegion(typeName: String,
case msg: StartEntity => deliverStartEntity(msg, sender())
case msg if extractEntityId.isDefinedAt(msg) => deliverMessage(msg, sender())
case unknownMsg =>
- log.warning("Message does not have an extractor defined in shard [{}] so it was ignored: {}",
- typeName,
- unknownMsg)
+ log.warning(
+ "Message does not have an extractor defined in shard [{}] so it was ignored: {}",
+ typeName,
+ unknownMsg)
}
def receiveClusterState(state: CurrentClusterState): Unit = {
@@ -893,15 +903,16 @@ private[akka] class ShardRegion(typeName: String,
val shard = context.watch(
context.actorOf(
Shard
- .props(typeName,
- id,
- props,
- settings,
- extractEntityId,
- extractShardId,
- handOffStopMessage,
- replicator,
- majorityMinCap)
+ .props(
+ typeName,
+ id,
+ props,
+ settings,
+ extractEntityId,
+ extractShardId,
+ handOffStopMessage,
+ replicator,
+ majorityMinCap)
.withDispatcher(context.props.dispatcher),
name))
shardsByRef = shardsByRef.updated(shard, id)
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala
index a95c17c21a..0f98befacf 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala
@@ -72,8 +72,9 @@ object ClusterShardingCustomShardAllocationSpec {
(ref ? AllocateReq).mapTo[ActorRef]
}
- override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]],
- rebalanceInProgress: Set[ShardRegion.ShardId]): Future[Set[ShardRegion.ShardId]] = {
+ override def rebalance(
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]],
+ rebalanceInProgress: Set[ShardRegion.ShardId]): Future[Set[ShardRegion.ShardId]] = {
(ref ? RebalanceReq).mapTo[Set[String]]
}
}
@@ -139,13 +140,14 @@ abstract class ClusterShardingCustomShardAllocationSpec(config: ClusterShardingC
}
def startSharding(): Unit = {
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = Props[Entity],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- allocationStrategy = TestAllocationStrategy(allocator),
- handOffStopMessage = PoisonPill)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = Props[Entity],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ allocationStrategy = TestAllocationStrategy(allocator),
+ handOffStopMessage = PoisonPill)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala
index 55767ef649..8c2f8afded 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala
@@ -139,11 +139,12 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf
}
def startSharding(): Unit = {
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = Props[Entity],
- settings = ClusterShardingSettings(system).withRememberEntities(true),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = Props[Entity],
+ settings = ClusterShardingSettings(system).withRememberEntities(true),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala
index 9abe5ab4f3..1cead11bfe 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala
@@ -79,18 +79,20 @@ abstract class ClusterShardingGetStateSpec
def initialParticipants = roles.size
def startShard(): ActorRef = {
- ClusterSharding(system).start(typeName = shardTypeName,
- entityProps = Props(new ShardedActor),
- settings = ClusterShardingSettings(system).withRole("shard"),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = shardTypeName,
+ entityProps = Props(new ShardedActor),
+ settings = ClusterShardingSettings(system).withRole("shard"),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
def startProxy(): ActorRef = {
- ClusterSharding(system).startProxy(typeName = shardTypeName,
- role = Some("shard"),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).startProxy(
+ typeName = shardTypeName,
+ role = Some("shard"),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
def join(from: RoleName): Unit = {
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala
index 04ff39bbf9..7976886118 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala
@@ -82,18 +82,20 @@ abstract class ClusterShardingGetStatsSpec
def initialParticipants = roles.size
def startShard(): ActorRef = {
- ClusterSharding(system).start(typeName = shardTypeName,
- entityProps = Props(new ShardedActor),
- settings = ClusterShardingSettings(system).withRole("shard"),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = shardTypeName,
+ entityProps = Props(new ShardedActor),
+ settings = ClusterShardingSettings(system).withRole("shard"),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
def startProxy(): ActorRef = {
- ClusterSharding(system).startProxy(typeName = shardTypeName,
- role = Some("shard"),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).startProxy(
+ typeName = shardTypeName,
+ role = Some("shard"),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
def join(from: RoleName): Unit = {
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala
index 6634d4b7d3..46eea5f152 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala
@@ -115,13 +115,14 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef
def startSharding(): Unit = {
val allocationStrategy =
new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = Props[Entity],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- allocationStrategy,
- handOffStopMessage = StopEntity)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = Props[Entity],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ allocationStrategy,
+ handOffStopMessage = StopEntity)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
@@ -194,13 +195,14 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef
runOn(first) {
val allocationStrategy =
new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
- val regionEmpty = ClusterSharding(system).start(typeName = "EntityEmpty",
- entityProps = Props[Entity],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- allocationStrategy,
- handOffStopMessage = StopEntity)
+ val regionEmpty = ClusterSharding(system).start(
+ typeName = "EntityEmpty",
+ entityProps = Props[Entity],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ allocationStrategy,
+ handOffStopMessage = StopEntity)
watch(regionEmpty)
regionEmpty ! GracefulShutdown
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala
index 490bca8a2f..d422925543 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala
@@ -50,11 +50,12 @@ abstract class ClusterShardingIncorrectSetupSpec
enterBarrier("cluster-up")
runOn(first) {
EventFilter.error(pattern = """Has ClusterSharding been started on all nodes?""").intercept {
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = TestActors.echoActorProps,
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = TestActors.echoActorProps,
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
}
enterBarrier("helpful error message logged")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala
index 6928b914ba..da87679b99 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala
@@ -136,11 +136,12 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf
}
def startSharding(): Unit = {
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = Props[Entity],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = Props[Entity],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala
index 34adf56387..5b3e3e33ff 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala
@@ -112,13 +112,14 @@ abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSp
def startSharding(): Unit = {
val allocationStrategy =
new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = TestActors.echoActorProps,
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- allocationStrategy,
- handOffStopMessage = StopEntity)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = TestActors.echoActorProps,
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ allocationStrategy,
+ handOffStopMessage = StopEntity)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala
index 4d7c8cdec9..012c74af56 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala
@@ -160,21 +160,21 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(
val cluster = Cluster(system)
def startShardingWithExtractor1(): Unit = {
- ClusterSharding(system).start(typeName = typeName,
- entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(None),
- settings =
- ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId1)
+ ClusterSharding(system).start(
+ typeName = typeName,
+ entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(None),
+ settings = ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId1)
}
def startShardingWithExtractor2(sys: ActorSystem, probe: ActorRef): Unit = {
- ClusterSharding(sys).start(typeName = typeName,
- entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(Some(probe)),
- settings =
- ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId2)
+ ClusterSharding(sys).start(
+ typeName = typeName,
+ entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(Some(probe)),
+ settings = ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId2)
}
def region(sys: ActorSystem = system) = ClusterSharding(sys).shardRegion(typeName)
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala
index 2be28b7248..13358e0e47 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala
@@ -130,11 +130,12 @@ abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememb
val cluster = Cluster(system)
def startSharding(sys: ActorSystem = system, probe: ActorRef = testActor): Unit = {
- ClusterSharding(sys).start(typeName = "Entity",
- entityProps = ClusterShardingRememberEntitiesSpec.props(probe),
- settings = ClusterShardingSettings(system).withRememberEntities(true),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(sys).start(
+ typeName = "Entity",
+ entityProps = ClusterShardingRememberEntitiesSpec.props(probe),
+ settings = ClusterShardingSettings(system).withRememberEntities(true),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala
index 32f8818197..fe4f66f59d 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala
@@ -78,11 +78,12 @@ abstract class ClusterShardingSingleShardPerEntitySpec
}
def startSharding(): Unit = {
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = Props[Entity],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = Props[Entity],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala
index d41145564c..2f38bf0cdd 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala
@@ -300,27 +300,28 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig)
ShardCoordinator.props(typeName, settings, allocationStrategy, replicator, majorityMinCap)
}
- List("counter",
- "rebalancingCounter",
- "RememberCounterEntities",
- "AnotherRememberCounter",
- "RememberCounter",
- "RebalancingRememberCounter",
- "AutoMigrateRememberRegionTest").foreach { typeName =>
+ List(
+ "counter",
+ "rebalancingCounter",
+ "RememberCounterEntities",
+ "AnotherRememberCounter",
+ "RememberCounter",
+ "RebalancingRememberCounter",
+ "AutoMigrateRememberRegionTest").foreach { typeName =>
val rebalanceEnabled = typeName.toLowerCase.startsWith("rebalancing")
val rememberEnabled = typeName.toLowerCase.contains("remember")
val singletonProps = BackoffSupervisor
- .props(childProps = coordinatorProps(typeName, rebalanceEnabled, rememberEnabled),
- childName = "coordinator",
- minBackoff = 5.seconds,
- maxBackoff = 5.seconds,
- randomFactor = 0.1,
- maxNrOfRetries = -1)
+ .props(
+ childProps = coordinatorProps(typeName, rebalanceEnabled, rememberEnabled),
+ childName = "coordinator",
+ minBackoff = 5.seconds,
+ maxBackoff = 5.seconds,
+ randomFactor = 0.1,
+ maxNrOfRetries = -1)
.withDeploy(Deploy.local)
system.actorOf(
- ClusterSingletonManager.props(singletonProps,
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ ClusterSingletonManager
+ .props(singletonProps, terminationMessage = PoisonPill, settings = ClusterSingletonManagerSettings(system)),
name = typeName + "Coordinator")
}
}
@@ -334,15 +335,16 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig)
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities)
system.actorOf(
- ShardRegion.props(typeName = typeName,
- entityProps = _ => qualifiedCounterProps(typeName),
- settings = settings,
- coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator",
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- handOffStopMessage = PoisonPill,
- replicator,
- majorityMinCap = 3),
+ ShardRegion.props(
+ typeName = typeName,
+ entityProps = _ => qualifiedCounterProps(typeName),
+ settings = settings,
+ coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator",
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ handOffStopMessage = PoisonPill,
+ replicator,
+ majorityMinCap = 3),
name = typeName + "Region")
}
@@ -463,16 +465,17 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig)
buffer-size = 1000
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
val settings = ClusterShardingSettings(cfg)
- val proxy = system.actorOf(ShardRegion.proxyProps(typeName = "counter",
- dataCenter = None,
- settings,
- coordinatorPath =
- "/user/counterCoordinator/singleton/coordinator",
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- system.deadLetters,
- majorityMinCap = 0),
- name = "regionProxy")
+ val proxy = system.actorOf(
+ ShardRegion.proxyProps(
+ typeName = "counter",
+ dataCenter = None,
+ settings,
+ coordinatorPath = "/user/counterCoordinator/singleton/coordinator",
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ system.deadLetters,
+ majorityMinCap = 0),
+ name = "regionProxy")
proxy ! Get(1)
expectMsg(2)
@@ -632,24 +635,27 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig)
"easy to use with extensions" in within(50.seconds) {
runOn(third, fourth, fifth, sixth) {
//#counter-start
- val counterRegion: ActorRef = ClusterSharding(system).start(typeName = "Counter",
- entityProps = Props[Counter],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ val counterRegion: ActorRef = ClusterSharding(system).start(
+ typeName = "Counter",
+ entityProps = Props[Counter],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
//#counter-start
- ClusterSharding(system).start(typeName = "AnotherCounter",
- entityProps = Props[AnotherCounter],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = "AnotherCounter",
+ entityProps = Props[AnotherCounter],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
//#counter-supervisor-start
- ClusterSharding(system).start(typeName = "SupervisedCounter",
- entityProps = Props[CounterSupervisor],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = "SupervisedCounter",
+ entityProps = Props[CounterSupervisor],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
//#counter-supervisor-start
}
enterBarrier("extension-started")
@@ -686,11 +692,12 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig)
}
"easy API for starting" in within(50.seconds) {
runOn(first) {
- val counterRegionViaStart: ActorRef = ClusterSharding(system).start(typeName = "ApiTest",
- entityProps = Props[Counter],
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ val counterRegionViaStart: ActorRef = ClusterSharding(system).start(
+ typeName = "ApiTest",
+ entityProps = Props[Counter],
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
val counterRegionViaGet: ActorRef = ClusterSharding(system).shardRegion("ApiTest")
@@ -703,11 +710,12 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig)
"demonstrate API for DC proxy" in within(50.seconds) {
runOn(sixth) {
// #proxy-dc
- val counterProxyDcB: ActorRef = ClusterSharding(system).startProxy(typeName = "Counter",
- role = None,
- dataCenter = Some("B"),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ val counterProxyDcB: ActorRef = ClusterSharding(system).startProxy(
+ typeName = "Counter",
+ role = None,
+ dataCenter = Some("B"),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
// #proxy-dc
}
enterBarrier("after-dc-proxy")
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala
index 56867aa794..21e242f986 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala
@@ -105,11 +105,12 @@ abstract class MultiDcClusterShardingSpec
}
def startSharding(): Unit = {
- ClusterSharding(system).start(typeName = "Entity",
- entityProps = Props[Entity](),
- settings = ClusterShardingSettings(system),
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ ClusterSharding(system).start(
+ typeName = "Entity",
+ entityProps = Props[Entity](),
+ settings = ClusterShardingSettings(system),
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
}
lazy val region = ClusterSharding(system).shardRegion("Entity")
@@ -193,11 +194,12 @@ abstract class MultiDcClusterShardingSpec
"allow proxy within same data center" in {
runOn(second) {
- val proxy = ClusterSharding(system).startProxy(typeName = "Entity",
- role = None,
- dataCenter = None, // by default use own DC
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ val proxy = ClusterSharding(system).startProxy(
+ typeName = "Entity",
+ role = None,
+ dataCenter = None, // by default use own DC
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
proxy ! GetCount("5")
expectMsg(1)
}
@@ -206,11 +208,12 @@ abstract class MultiDcClusterShardingSpec
"allow proxy across different data centers" in {
runOn(second) {
- val proxy = ClusterSharding(system).startProxy(typeName = "Entity",
- role = None,
- dataCenter = Some("DC2"), // proxy to other DC
- extractEntityId = extractEntityId,
- extractShardId = extractShardId)
+ val proxy = ClusterSharding(system).startProxy(
+ typeName = "Entity",
+ role = None,
+ dataCenter = Some("DC2"), // proxy to other DC
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId)
proxy ! GetCount("5")
expectMsg(2)
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala
index 86a92fa7e8..b907db6d51 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala
@@ -45,19 +45,21 @@ class ClusterShardingInternalsSpec extends AkkaSpec("""
val extractEntityId = mock[ShardRegion.ExtractEntityId]
val extractShardId = mock[ShardRegion.ExtractShardId]
- clusterSharding.start(typeName = typeName,
- entityProps = Props.empty,
- settings = settingsWithRole,
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- allocationStrategy = mock[ShardAllocationStrategy],
- handOffStopMessage = PoisonPill)
+ clusterSharding.start(
+ typeName = typeName,
+ entityProps = Props.empty,
+ settings = settingsWithRole,
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ allocationStrategy = mock[ShardAllocationStrategy],
+ handOffStopMessage = PoisonPill)
- verify(clusterSharding).startProxy(ArgumentMatchers.eq(typeName),
- ArgumentMatchers.eq(settingsWithRole.role),
- ArgumentMatchers.eq(None),
- ArgumentMatchers.eq(extractEntityId),
- ArgumentMatchers.eq(extractShardId))
+ verify(clusterSharding).startProxy(
+ ArgumentMatchers.eq(typeName),
+ ArgumentMatchers.eq(settingsWithRole.role),
+ ArgumentMatchers.eq(None),
+ ArgumentMatchers.eq(extractEntityId),
+ ArgumentMatchers.eq(extractShardId))
}
"HandOffStopper must stop the entity even if the entity doesn't handle handOffStopMessage" in {
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala
index de7a52735e..c0b1eea828 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala
@@ -43,21 +43,24 @@ class CoordinatedShutdownShardingSpec extends AkkaSpec(CoordinatedShutdownShardi
val sys2 = ActorSystem(system.name, system.settings.config)
val sys3 = system
- val region1 = ClusterSharding(sys1).start("type1",
- Props[EchoActor](),
- ClusterShardingSettings(sys1),
- extractEntityId,
- extractShardId)
- val region2 = ClusterSharding(sys2).start("type1",
- Props[EchoActor](),
- ClusterShardingSettings(sys2),
- extractEntityId,
- extractShardId)
- val region3 = ClusterSharding(sys3).start("type1",
- Props[EchoActor](),
- ClusterShardingSettings(sys3),
- extractEntityId,
- extractShardId)
+ val region1 = ClusterSharding(sys1).start(
+ "type1",
+ Props[EchoActor](),
+ ClusterShardingSettings(sys1),
+ extractEntityId,
+ extractShardId)
+ val region2 = ClusterSharding(sys2).start(
+ "type1",
+ Props[EchoActor](),
+ ClusterShardingSettings(sys2),
+ extractEntityId,
+ extractShardId)
+ val region3 = ClusterSharding(sys3).start(
+ "type1",
+ Props[EchoActor](),
+ ClusterShardingSettings(sys3),
+ extractEntityId,
+ extractShardId)
val probe1 = TestProbe()(sys1)
val probe2 = TestProbe()(sys2)
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala
index daeda42b9b..9ed95cdbf1 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala
@@ -60,13 +60,14 @@ class InactiveEntityPassivationSpec extends AkkaSpec(InactiveEntityPassivationSp
Cluster(system).join(Cluster(system).selfAddress)
val probe = TestProbe()
val settings = ClusterShardingSettings(system)
- val region = ClusterSharding(system).start("myType",
- InactiveEntityPassivationSpec.Entity.props(probe.ref),
- settings,
- extractEntityId,
- extractShardId,
- ClusterSharding(system).defaultShardAllocationStrategy(settings),
- Passivate)
+ val region = ClusterSharding(system).start(
+ "myType",
+ InactiveEntityPassivationSpec.Entity.props(probe.ref),
+ settings,
+ extractEntityId,
+ extractShardId,
+ ClusterSharding(system).defaultShardAllocationStrategy(settings),
+ Passivate)
region ! 1
region ! 2
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala
index 721d5023e1..93a83e6b9e 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala
@@ -17,9 +17,10 @@ class LeastShardAllocationStrategySpec extends AkkaSpec {
def createAllocations(aCount: Int, bCount: Int = 0, cCount: Int = 0): Map[ActorRef, Vector[String]] = {
val shards = (1 to (aCount + bCount + cCount)).map(n => ("00" + n.toString).takeRight(3))
- Map(regionA -> shards.take(aCount).toVector,
- regionB -> shards.slice(aCount, aCount + bCount).toVector,
- regionC -> shards.takeRight(cCount).toVector)
+ Map(
+ regionA -> shards.take(aCount).toVector,
+ regionB -> shards.slice(aCount, aCount + bCount).toVector,
+ regionC -> shards.takeRight(cCount).toVector)
}
"LeastShardAllocationStrategy" must {
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala
index 157667facc..75c0ae4504 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala
@@ -41,10 +41,11 @@ class ProxyShardingSpec extends AkkaSpec(ProxyShardingSpec.config) {
clusterSharding.startProxy("myType", Some(role), idExtractor, shardResolver)
"Proxy should be found" in {
- val proxyActor: ActorRef = Await.result(system
- .actorSelection("akka://ProxyShardingSpec/system/sharding/myTypeProxy")
- .resolveOne(FiniteDuration(5, SECONDS)),
- 3.seconds)
+ val proxyActor: ActorRef = Await.result(
+ system
+ .actorSelection("akka://ProxyShardingSpec/system/sharding/myTypeProxy")
+ .resolveOne(FiniteDuration(5, SECONDS)),
+ 3.seconds)
proxyActor.path should not be null
proxyActor.path.toString should endWith("Proxy")
@@ -60,10 +61,11 @@ class ProxyShardingSpec extends AkkaSpec(ProxyShardingSpec.config) {
"Shard coordinator should be found" in {
val shardCoordinator: ActorRef =
- Await.result(system
- .actorSelection("akka://ProxyShardingSpec/system/sharding/myTypeCoordinator")
- .resolveOne(FiniteDuration(5, SECONDS)),
- 3.seconds)
+ Await.result(
+ system
+ .actorSelection("akka://ProxyShardingSpec/system/sharding/myTypeCoordinator")
+ .resolveOne(FiniteDuration(5, SECONDS)),
+ 3.seconds)
shardCoordinator.path should not be null
shardCoordinator.path.toString should endWith("Coordinator")
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala
index 3e816989b8..e39853d2a1 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala
@@ -201,11 +201,12 @@ class RemoveInternalClusterShardingDataSpec
hasEvents(typeName) should ===(true)
}
- val result = RemoveInternalClusterShardingData.remove(system,
- journalPluginId = "",
- typeNames.toSet,
- terminateSystem = false,
- remove2dot3Data = true)
+ val result = RemoveInternalClusterShardingData.remove(
+ system,
+ journalPluginId = "",
+ typeNames.toSet,
+ terminateSystem = false,
+ remove2dot3Data = true)
Await.ready(result, remaining)
typeNames.foreach { typeName =>
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala
index ce55889c4c..437f48ec52 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala
@@ -69,20 +69,22 @@ class SupervisionSpec extends AkkaSpec(SupervisionSpec.config) with ImplicitSend
val supervisedProps = BackoffSupervisor.props(
Backoff
- .onStop(Props(new PassivatingActor()),
- childName = "child",
- minBackoff = 1.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2,
- maxNrOfRetries = -1)
+ .onStop(
+ Props(new PassivatingActor()),
+ childName = "child",
+ minBackoff = 1.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2,
+ maxNrOfRetries = -1)
.withFinalStopMessage(_ == StopMessage))
Cluster(system).join(Cluster(system).selfAddress)
- val region = ClusterSharding(system).start("passy",
- supervisedProps,
- ClusterShardingSettings(system),
- idExtractor,
- shardResolver)
+ val region = ClusterSharding(system).start(
+ "passy",
+ supervisedProps,
+ ClusterShardingSettings(system),
+ idExtractor,
+ shardResolver)
region ! Msg(10, "hello")
val response = expectMsgType[Response](5.seconds)
@@ -103,19 +105,21 @@ class SupervisionSpec extends AkkaSpec(SupervisionSpec.config) with ImplicitSend
val supervisedProps = BackoffSupervisor.props(
BackoffOpts
- .onStop(Props(new PassivatingActor()),
- childName = "child",
- minBackoff = 1.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2)
+ .onStop(
+ Props(new PassivatingActor()),
+ childName = "child",
+ minBackoff = 1.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2)
.withFinalStopMessage(_ == StopMessage))
Cluster(system).join(Cluster(system).selfAddress)
- val region = ClusterSharding(system).start("passy",
- supervisedProps,
- ClusterShardingSettings(system),
- idExtractor,
- shardResolver)
+ val region = ClusterSharding(system).start(
+ "passy",
+ supervisedProps,
+ ClusterShardingSettings(system),
+ idExtractor,
+ shardResolver)
region ! Msg(10, "hello")
val response = expectMsgType[Response](5.seconds)
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala
index 2140ecc618..4ef42ca059 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala
@@ -30,11 +30,11 @@ class ClusterShardingMessageSerializerSpec extends AkkaSpec {
"ClusterShardingMessageSerializer" must {
"be able to serialize ShardCoordinator snapshot State" in {
- val state = State(shards = Map("a" -> region1, "b" -> region2, "c" -> region2),
- regions =
- Map(region1 -> Vector("a"), region2 -> Vector("b", "c"), region3 -> Vector.empty[String]),
- regionProxies = Set(regionProxy1, regionProxy2),
- unallocatedShards = Set("d"))
+ val state = State(
+ shards = Map("a" -> region1, "b" -> region2, "c" -> region2),
+ regions = Map(region1 -> Vector("a"), region2 -> Vector("b", "c"), region3 -> Vector.empty[String]),
+ regionProxies = Set(regionProxy1, regionProxy2),
+ unallocatedShards = Set("d"))
checkSerialization(state)
}
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala
index aacfc0d882..d560e1199f 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala
@@ -109,13 +109,14 @@ object ClusterClientSettings {
* to watch it from another actor and possibly acquire a new list of initialContacts from some
* external service registry
*/
-final class ClusterClientSettings(val initialContacts: Set[ActorPath],
- val establishingGetContactsInterval: FiniteDuration,
- val refreshContactsInterval: FiniteDuration,
- val heartbeatInterval: FiniteDuration,
- val acceptableHeartbeatPause: FiniteDuration,
- val bufferSize: Int,
- val reconnectTimeout: Option[FiniteDuration])
+final class ClusterClientSettings(
+ val initialContacts: Set[ActorPath],
+ val establishingGetContactsInterval: FiniteDuration,
+ val refreshContactsInterval: FiniteDuration,
+ val heartbeatInterval: FiniteDuration,
+ val acceptableHeartbeatPause: FiniteDuration,
+ val bufferSize: Int,
+ val reconnectTimeout: Option[FiniteDuration])
extends NoSerializationVerificationNeeded {
require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000")
@@ -123,19 +124,21 @@ final class ClusterClientSettings(val initialContacts: Set[ActorPath],
/**
* For binary/source compatibility
*/
- def this(initialContacts: Set[ActorPath],
- establishingGetContactsInterval: FiniteDuration,
- refreshContactsInterval: FiniteDuration,
- heartbeatInterval: FiniteDuration,
- acceptableHeartbeatPause: FiniteDuration,
- bufferSize: Int) =
- this(initialContacts,
- establishingGetContactsInterval,
- refreshContactsInterval,
- heartbeatInterval,
- acceptableHeartbeatPause,
- bufferSize,
- None)
+ def this(
+ initialContacts: Set[ActorPath],
+ establishingGetContactsInterval: FiniteDuration,
+ refreshContactsInterval: FiniteDuration,
+ heartbeatInterval: FiniteDuration,
+ acceptableHeartbeatPause: FiniteDuration,
+ bufferSize: Int) =
+ this(
+ initialContacts,
+ establishingGetContactsInterval,
+ refreshContactsInterval,
+ heartbeatInterval,
+ acceptableHeartbeatPause,
+ bufferSize,
+ None)
/**
* Scala API
@@ -159,8 +162,9 @@ final class ClusterClientSettings(val initialContacts: Set[ActorPath],
def withRefreshContactsInterval(refreshContactsInterval: FiniteDuration): ClusterClientSettings =
copy(refreshContactsInterval = refreshContactsInterval)
- def withHeartbeat(heartbeatInterval: FiniteDuration,
- acceptableHeartbeatPause: FiniteDuration): ClusterClientSettings =
+ def withHeartbeat(
+ heartbeatInterval: FiniteDuration,
+ acceptableHeartbeatPause: FiniteDuration): ClusterClientSettings =
copy(heartbeatInterval = heartbeatInterval, acceptableHeartbeatPause = acceptableHeartbeatPause)
def withBufferSize(bufferSize: Int): ClusterClientSettings =
@@ -169,20 +173,22 @@ final class ClusterClientSettings(val initialContacts: Set[ActorPath],
def withReconnectTimeout(reconnectTimeout: Option[FiniteDuration]): ClusterClientSettings =
copy(reconnectTimeout = reconnectTimeout)
- private def copy(initialContacts: Set[ActorPath] = initialContacts,
- establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval,
- refreshContactsInterval: FiniteDuration = refreshContactsInterval,
- heartbeatInterval: FiniteDuration = heartbeatInterval,
- acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
- bufferSize: Int = bufferSize,
- reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings =
- new ClusterClientSettings(initialContacts,
- establishingGetContactsInterval,
- refreshContactsInterval,
- heartbeatInterval,
- acceptableHeartbeatPause,
- bufferSize,
- reconnectTimeout)
+ private def copy(
+ initialContacts: Set[ActorPath] = initialContacts,
+ establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval,
+ refreshContactsInterval: FiniteDuration = refreshContactsInterval,
+ heartbeatInterval: FiniteDuration = heartbeatInterval,
+ acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
+ bufferSize: Int = bufferSize,
+ reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings =
+ new ClusterClientSettings(
+ initialContacts,
+ establishingGetContactsInterval,
+ refreshContactsInterval,
+ heartbeatInterval,
+ acceptableHeartbeatPause,
+ bufferSize,
+ reconnectTimeout)
}
/**
@@ -413,8 +419,9 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac
case Publish(topic, msg) =>
buffer(DistributedPubSubMediator.Publish(topic, msg))
case ReconnectTimeout =>
- log.warning("Receptionist reconnect not successful within {} stopping cluster client",
- settings.reconnectTimeout)
+ log.warning(
+ "Receptionist reconnect not successful within {} stopping cluster client",
+ settings.reconnectTimeout)
context.stop(self)
case ReceptionistShutdown => // ok, haven't chosen a receptionist yet
}
@@ -657,9 +664,10 @@ object ClusterReceptionistSettings {
* @param responseTunnelReceiveTimeout The actor that tunnel response messages to the
* client will be stopped after this time of inactivity.
*/
-final class ClusterReceptionistSettings(val role: Option[String],
- val numberOfContacts: Int,
- val responseTunnelReceiveTimeout: FiniteDuration)
+final class ClusterReceptionistSettings(
+ val role: Option[String],
+ val numberOfContacts: Int,
+ val responseTunnelReceiveTimeout: FiniteDuration)
extends NoSerializationVerificationNeeded {
def withRole(role: String): ClusterReceptionistSettings = copy(role = ClusterReceptionistSettings.roleOption(role))
@@ -672,12 +680,14 @@ final class ClusterReceptionistSettings(val role: Option[String],
def withResponseTunnelReceiveTimeout(responseTunnelReceiveTimeout: FiniteDuration): ClusterReceptionistSettings =
copy(responseTunnelReceiveTimeout = responseTunnelReceiveTimeout)
- def withHeartbeat(heartbeatInterval: FiniteDuration,
- acceptableHeartbeatPause: FiniteDuration,
- failureDetectionInterval: FiniteDuration): ClusterReceptionistSettings =
- copy(heartbeatInterval = heartbeatInterval,
- acceptableHeartbeatPause = acceptableHeartbeatPause,
- failureDetectionInterval = failureDetectionInterval)
+ def withHeartbeat(
+ heartbeatInterval: FiniteDuration,
+ acceptableHeartbeatPause: FiniteDuration,
+ failureDetectionInterval: FiniteDuration): ClusterReceptionistSettings =
+ copy(
+ heartbeatInterval = heartbeatInterval,
+ acceptableHeartbeatPause = acceptableHeartbeatPause,
+ failureDetectionInterval = failureDetectionInterval)
// BEGIN BINARY COMPATIBILITY
// The following is required in order to maintain binary
@@ -697,12 +707,13 @@ final class ClusterReceptionistSettings(val role: Option[String],
private var _acceptableHeartbeatPause: FiniteDuration = 13.seconds
private var _failureDetectionInterval: FiniteDuration = 2.second
- def this(role: Option[String],
- numberOfContacts: Int,
- responseTunnelReceiveTimeout: FiniteDuration,
- heartbeatInterval: FiniteDuration,
- acceptableHeartbeatPause: FiniteDuration,
- failureDetectionInterval: FiniteDuration) = {
+ def this(
+ role: Option[String],
+ numberOfContacts: Int,
+ responseTunnelReceiveTimeout: FiniteDuration,
+ heartbeatInterval: FiniteDuration,
+ acceptableHeartbeatPause: FiniteDuration,
+ failureDetectionInterval: FiniteDuration) = {
this(role, numberOfContacts, responseTunnelReceiveTimeout)
this._heartbeatInterval = heartbeatInterval
this._acceptableHeartbeatPause = acceptableHeartbeatPause
@@ -711,18 +722,20 @@ final class ClusterReceptionistSettings(val role: Option[String],
// END BINARY COMPATIBILITY
- private def copy(role: Option[String] = role,
- numberOfContacts: Int = numberOfContacts,
- responseTunnelReceiveTimeout: FiniteDuration = responseTunnelReceiveTimeout,
- heartbeatInterval: FiniteDuration = heartbeatInterval,
- acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
- failureDetectionInterval: FiniteDuration = failureDetectionInterval): ClusterReceptionistSettings =
- new ClusterReceptionistSettings(role,
- numberOfContacts,
- responseTunnelReceiveTimeout,
- heartbeatInterval,
- acceptableHeartbeatPause,
- failureDetectionInterval)
+ private def copy(
+ role: Option[String] = role,
+ numberOfContacts: Int = numberOfContacts,
+ responseTunnelReceiveTimeout: FiniteDuration = responseTunnelReceiveTimeout,
+ heartbeatInterval: FiniteDuration = heartbeatInterval,
+ acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
+ failureDetectionInterval: FiniteDuration = failureDetectionInterval): ClusterReceptionistSettings =
+ new ClusterReceptionistSettings(
+ role,
+ numberOfContacts,
+ responseTunnelReceiveTimeout,
+ heartbeatInterval,
+ acceptableHeartbeatPause,
+ failureDetectionInterval)
}
/**
@@ -968,9 +981,10 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep
if (numberOfContacts >= nodes.size) {
val contacts = Contacts(nodes.iterator.map(a => self.path.toStringWithAddress(a)).to(immutable.IndexedSeq))
if (log.isDebugEnabled)
- log.debug("Client [{}] gets contactPoints [{}] (all nodes)",
- sender().path,
- contacts.contactPoints.mkString(","))
+ log.debug(
+ "Client [{}] gets contactPoints [{}] (all nodes)",
+ sender().path,
+ contacts.contactPoints.mkString(","))
sender() ! contacts
} else {
// using toStringWithAddress in case the client is local, normally it is not, and
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala
index a58e10b881..3eeb36dc90 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala
@@ -90,30 +90,34 @@ object DistributedPubSubSettings {
* the registries. Next chunk will be transferred in next round of gossip.
* @param sendToDeadLettersWhenNoSubscribers When a message is published to a topic with no subscribers send it to the dead letters.
*/
-final class DistributedPubSubSettings(val role: Option[String],
- val routingLogic: RoutingLogic,
- val gossipInterval: FiniteDuration,
- val removedTimeToLive: FiniteDuration,
- val maxDeltaElements: Int,
- val sendToDeadLettersWhenNoSubscribers: Boolean)
+final class DistributedPubSubSettings(
+ val role: Option[String],
+ val routingLogic: RoutingLogic,
+ val gossipInterval: FiniteDuration,
+ val removedTimeToLive: FiniteDuration,
+ val maxDeltaElements: Int,
+ val sendToDeadLettersWhenNoSubscribers: Boolean)
extends NoSerializationVerificationNeeded {
@deprecated("Use the other constructor instead.", "2.5.5")
- def this(role: Option[String],
- routingLogic: RoutingLogic,
- gossipInterval: FiniteDuration,
- removedTimeToLive: FiniteDuration,
- maxDeltaElements: Int) {
- this(role,
- routingLogic,
- gossipInterval,
- removedTimeToLive,
- maxDeltaElements,
- sendToDeadLettersWhenNoSubscribers = true)
+ def this(
+ role: Option[String],
+ routingLogic: RoutingLogic,
+ gossipInterval: FiniteDuration,
+ removedTimeToLive: FiniteDuration,
+ maxDeltaElements: Int) {
+ this(
+ role,
+ routingLogic,
+ gossipInterval,
+ removedTimeToLive,
+ maxDeltaElements,
+ sendToDeadLettersWhenNoSubscribers = true)
}
- require(!routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
- "'ConsistentHashingRoutingLogic' can't be used by the pub-sub mediator")
+ require(
+ !routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
+ "'ConsistentHashingRoutingLogic' can't be used by the pub-sub mediator")
def withRole(role: String): DistributedPubSubSettings = copy(role = DistributedPubSubSettings.roleOption(role))
@@ -141,12 +145,13 @@ final class DistributedPubSubSettings(val role: Option[String],
removedTimeToLive: FiniteDuration = removedTimeToLive,
maxDeltaElements: Int = maxDeltaElements,
sendToDeadLettersWhenNoSubscribers: Boolean = sendToDeadLettersWhenNoSubscribers): DistributedPubSubSettings =
- new DistributedPubSubSettings(role,
- routingLogic,
- gossipInterval,
- removedTimeToLive,
- maxDeltaElements,
- sendToDeadLettersWhenNoSubscribers)
+ new DistributedPubSubSettings(
+ role,
+ routingLogic,
+ gossipInterval,
+ removedTimeToLive,
+ maxDeltaElements,
+ sendToDeadLettersWhenNoSubscribers)
}
object DistributedPubSubMediator {
@@ -531,14 +536,16 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings)
import DistributedPubSubMediator.Internal._
import settings._
- require(!routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
- "'consistent-hashing' routing logic can't be used by the pub-sub mediator")
+ require(
+ !routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
+ "'consistent-hashing' routing logic can't be used by the pub-sub mediator")
val cluster = Cluster(context.system)
import cluster.selfAddress
- require(role.forall(cluster.selfRoles.contains),
- s"This cluster member [${selfAddress}] doesn't have the role [$role]")
+ require(
+ role.forall(cluster.selfRoles.contains),
+ s"This cluster member [${selfAddress}] doesn't have the role [$role]")
val removedTimeToLiveMillis = removedTimeToLive.toMillis
@@ -804,8 +811,9 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings)
def put(key: String, valueOption: Option[ActorRef]): Unit = {
val bucket = registry(selfAddress)
val v = nextVersion()
- registry += (selfAddress -> bucket.copy(version = v,
- content = bucket.content + (key -> ValueHolder(v, valueOption))))
+ registry += (selfAddress -> bucket.copy(
+ version = v,
+ content = bucket.content + (key -> ValueHolder(v, valueOption))))
}
def getCurrentTopics(): Set[String] = {
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala
index 5ec241d285..a998624d1a 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala
@@ -126,8 +126,9 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor
private def statusFromProto(status: dm.Status): Status = {
val isReplyToStatus = if (status.hasReplyToStatus) status.getReplyToStatus else false
- Status(status.getVersionsList.asScala.iterator.map(v => addressFromProto(v.getAddress) -> v.getTimestamp).toMap,
- isReplyToStatus)
+ Status(
+ status.getVersionsList.asScala.iterator.map(v => addressFromProto(v.getAddress) -> v.getTimestamp).toMap,
+ isReplyToStatus)
}
private def deltaToProto(delta: Delta): dm.Delta = {
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala
index acb96abba6..17eaf2ab05 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala
@@ -53,11 +53,11 @@ object ClusterSingletonManagerSettings {
* the default configuration `akka.cluster.singleton`.
*/
def apply(config: Config): ClusterSingletonManagerSettings =
- new ClusterSingletonManagerSettings(singletonName = config.getString("singleton-name"),
- role = roleOption(config.getString("role")),
- removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin
- handOverRetryInterval =
- config.getDuration("hand-over-retry-interval", MILLISECONDS).millis)
+ new ClusterSingletonManagerSettings(
+ singletonName = config.getString("singleton-name"),
+ role = roleOption(config.getString("role")),
+ removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin
+ handOverRetryInterval = config.getDuration("hand-over-retry-interval", MILLISECONDS).millis)
/**
* Java API: Create settings from the default configuration
@@ -99,10 +99,11 @@ object ClusterSingletonManagerSettings {
* over has started or the previous oldest member is removed from the cluster
* (+ `removalMargin`).
*/
-final class ClusterSingletonManagerSettings(val singletonName: String,
- val role: Option[String],
- val removalMargin: FiniteDuration,
- val handOverRetryInterval: FiniteDuration)
+final class ClusterSingletonManagerSettings(
+ val singletonName: String,
+ val role: Option[String],
+ val removalMargin: FiniteDuration,
+ val handOverRetryInterval: FiniteDuration)
extends NoSerializationVerificationNeeded {
def withSingletonName(name: String): ClusterSingletonManagerSettings = copy(singletonName = name)
@@ -118,10 +119,11 @@ final class ClusterSingletonManagerSettings(val singletonName: String,
def withHandOverRetryInterval(retryInterval: FiniteDuration): ClusterSingletonManagerSettings =
copy(handOverRetryInterval = retryInterval)
- private def copy(singletonName: String = singletonName,
- role: Option[String] = role,
- removalMargin: FiniteDuration = removalMargin,
- handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings =
+ private def copy(
+ singletonName: String = singletonName,
+ role: Option[String] = role,
+ removalMargin: FiniteDuration = removalMargin,
+ handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings =
new ClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval)
}
@@ -204,9 +206,10 @@ object ClusterSingletonManager {
final case class YoungerData(oldestOption: Option[UniqueAddress]) extends Data
final case class BecomingOldestData(previousOldestOption: Option[UniqueAddress]) extends Data
final case class OldestData(singleton: ActorRef, singletonTerminated: Boolean = false) extends Data
- final case class WasOldestData(singleton: ActorRef,
- singletonTerminated: Boolean,
- newOldestOption: Option[UniqueAddress])
+ final case class WasOldestData(
+ singleton: ActorRef,
+ singletonTerminated: Boolean,
+ newOldestOption: Option[UniqueAddress])
extends Data
final case class HandingOverData(singleton: ActorRef, handOverTo: Option[ActorRef]) extends Data
final case class StoppingData(singleton: ActorRef) extends Data
@@ -450,8 +453,9 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
val selfUniqueAddressOption = Some(cluster.selfUniqueAddress)
import cluster.settings.LogInfo
- require(role.forall(cluster.selfRoles.contains),
- s"This cluster member [${cluster.selfAddress}] doesn't have the role [$role]")
+ require(
+ role.forall(cluster.selfRoles.contains),
+ s"This cluster member [${cluster.selfAddress}] doesn't have the role [$role]")
val removalMargin =
if (settings.removalMargin <= Duration.Zero) cluster.downingProvider.downRemovalMargin
@@ -573,9 +577,10 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
goto(BecomingOldest).using(BecomingOldestData(previousOldestOption))
}
} else {
- logInfo("Younger observed OldestChanged: [{} -> {}]",
- previousOldestOption.map(_.address),
- oldestOption.map(_.address))
+ logInfo(
+ "Younger observed OldestChanged: [{} -> {}]",
+ previousOldestOption.map(_.address),
+ oldestOption.map(_.address))
getNextOldestChanged()
stay.using(YoungerData(oldestOption))
}
@@ -617,9 +622,10 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
if (sender().path.address == previousOldest.address)
gotoOldest()
else {
- logInfo("Ignoring HandOverDone in BecomingOldest from [{}]. Expected previous oldest [{}]",
- sender().path.address,
- previousOldest.address)
+ logInfo(
+ "Ignoring HandOverDone in BecomingOldest from [{}]. Expected previous oldest [{}]",
+ sender().path.address,
+ previousOldest.address)
stay
}
@@ -655,9 +661,10 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
case Some(previousOldest) =>
if (previousOldest == senderUniqueAddress) sender() ! HandOverToMe
else
- logInfo("Ignoring TakeOver request in BecomingOldest from [{}]. Expected previous oldest [{}]",
- sender().path.address,
- previousOldest.address)
+ logInfo(
+ "Ignoring TakeOver request in BecomingOldest from [{}]. Expected previous oldest [{}]",
+ sender().path.address,
+ previousOldest.address)
stay
case None =>
sender() ! HandOverToMe
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala
index 58e1e3b394..de314eafb1 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala
@@ -36,11 +36,11 @@ object ClusterSingletonProxySettings {
* the default configuration `akka.cluster.singleton-proxy`.
*/
def apply(config: Config): ClusterSingletonProxySettings =
- new ClusterSingletonProxySettings(singletonName = config.getString("singleton-name"),
- role = roleOption(config.getString("role")),
- singletonIdentificationInterval =
- config.getDuration("singleton-identification-interval", MILLISECONDS).millis,
- bufferSize = config.getInt("buffer-size"))
+ new ClusterSingletonProxySettings(
+ singletonName = config.getString("singleton-name"),
+ role = roleOption(config.getString("role")),
+ singletonIdentificationInterval = config.getDuration("singleton-identification-interval", MILLISECONDS).millis,
+ bufferSize = config.getInt("buffer-size"))
/**
* Java API: Create settings from the default configuration
@@ -72,18 +72,20 @@ object ClusterSingletonProxySettings {
* when new messages are sent viea the proxy. Use 0 to disable buffering, i.e. messages will be dropped
* immediately if the location of the singleton is unknown.
*/
-final class ClusterSingletonProxySettings(val singletonName: String,
- val role: Option[String],
- val dataCenter: Option[DataCenter],
- val singletonIdentificationInterval: FiniteDuration,
- val bufferSize: Int)
+final class ClusterSingletonProxySettings(
+ val singletonName: String,
+ val role: Option[String],
+ val dataCenter: Option[DataCenter],
+ val singletonIdentificationInterval: FiniteDuration,
+ val bufferSize: Int)
extends NoSerializationVerificationNeeded {
// for backwards compatibility
- def this(singletonName: String,
- role: Option[String],
- singletonIdentificationInterval: FiniteDuration,
- bufferSize: Int) =
+ def this(
+ singletonName: String,
+ role: Option[String],
+ singletonIdentificationInterval: FiniteDuration,
+ bufferSize: Int) =
this(singletonName, role, None, singletonIdentificationInterval, bufferSize)
require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000")
@@ -106,11 +108,12 @@ final class ClusterSingletonProxySettings(val singletonName: String,
def withBufferSize(bufferSize: Int): ClusterSingletonProxySettings =
copy(bufferSize = bufferSize)
- private def copy(singletonName: String = singletonName,
- role: Option[String] = role,
- dataCenter: Option[DataCenter] = dataCenter,
- singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval,
- bufferSize: Int = bufferSize): ClusterSingletonProxySettings =
+ private def copy(
+ singletonName: String = singletonName,
+ role: Option[String] = role,
+ dataCenter: Option[DataCenter] = dataCenter,
+ singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval,
+ bufferSize: Int = bufferSize): ClusterSingletonProxySettings =
new ClusterSingletonProxySettings(singletonName, role, dataCenter, singletonIdentificationInterval, bufferSize)
}
@@ -291,9 +294,10 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste
singleton match {
case Some(s) =>
if (log.isDebugEnabled)
- log.debug("Forwarding message of type [{}] to current singleton instance at [{}]",
- Logging.simpleName(msg.getClass),
- s.path)
+ log.debug(
+ "Forwarding message of type [{}] to current singleton instance at [{}]",
+ Logging.simpleName(msg.getClass),
+ s.path)
s.forward(msg)
case None =>
buffer(msg)
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala
index ff7a9a2f10..a20d9eb3ac 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala
@@ -74,9 +74,9 @@ class ClusterClientHandoverSpec
"establish connection to first node" in {
runOn(client) {
- clusterClient =
- system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
- "client1")
+ clusterClient = system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
+ "client1")
clusterClient ! ClusterClient.Send("/user/testService", "hello", localAffinity = true)
expectMsgType[String](3.seconds) should be("hello")
}
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala
index 023cc3b784..b65deb66d0 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala
@@ -209,8 +209,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
"communicate to actor on any node in cluster" in within(10 seconds) {
runOn(client) {
- val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
- "client1")
+ val c = system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
+ "client1")
c ! ClusterClient.Send("/user/testService", "hello", localAffinity = true)
expectMsgType[Reply].msg should be("hello-ack")
system.stop(c)
@@ -225,8 +226,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
"work with ask" in within(10 seconds) {
runOn(client) {
import akka.pattern.ask
- val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
- "ask-client")
+ val c = system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
+ "ask-client")
implicit val timeout = Timeout(remaining)
val reply = c ? ClusterClient.Send("/user/testService", "hello-request", localAffinity = true)
Await.result(reply.mapTo[Reply], remaining).msg should be("hello-request-ack")
@@ -263,8 +265,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
//#client
runOn(client) {
- val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
- "client")
+ val c = system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
+ "client")
c ! ClusterClient.Send("/user/serviceA", "hello", localAffinity = true)
c ! ClusterClient.SendToAll("/user/serviceB", "hi")
}
@@ -277,8 +280,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
lazy val docOnly = { //not used, only demo
//#initialContacts
- val initialContacts = Set(ActorPath.fromString("akka.tcp://OtherSys@host1:2552/system/receptionist"),
- ActorPath.fromString("akka.tcp://OtherSys@host2:2552/system/receptionist"))
+ val initialContacts = Set(
+ ActorPath.fromString("akka.tcp://OtherSys@host1:2552/system/receptionist"),
+ ActorPath.fromString("akka.tcp://OtherSys@host2:2552/system/receptionist"))
val settings = ClusterClientSettings(system).withInitialContacts(initialContacts)
//#initialContacts
}
@@ -370,8 +374,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
runOn(client) {
val client =
- system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
- "client2")
+ system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
+ "client2")
client ! ClusterClient.Send("/user/service2", "bonjour", localAffinity = true)
val reply = expectMsgType[Reply]
@@ -409,8 +414,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
"re-establish connection to receptionist after partition" in within(30 seconds) {
runOn(client) {
- val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
- "client3")
+ val c = system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
+ "client3")
c ! ClusterClient.Send("/user/service2", "bonjour2", localAffinity = true)
val reply = expectMsgType[Reply]
@@ -455,8 +461,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
node(r) / "system" / "receptionist"
}
val c =
- system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(remainingContacts)),
- "client4")
+ system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(remainingContacts)),
+ "client4")
c ! ClusterClient.Send("/user/service2", "bonjour4", localAffinity = true)
expectMsg(10.seconds, Reply("bonjour4-ack", remainingContacts.head.address))
@@ -481,8 +488,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
Await.ready(system.whenTerminated, 20.seconds)
// start new system on same port
val port = Cluster(system).selfAddress.port.get
- val sys2 = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val sys2 = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.artery.canonical.port=$port
akka.remote.netty.tcp.port=$port
""").withFallback(system.settings.config))
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala
index 9d57cd5fdb..e869f21322 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala
@@ -86,8 +86,9 @@ class ClusterClientStopSpec extends MultiNodeSpec(ClusterClientStopSpec) with ST
"stop if re-establish fails for too long time" in within(20.seconds) {
runOn(client) {
- val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
- "client1")
+ val c = system.actorOf(
+ ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)),
+ "client1")
c ! ClusterClient.Send("/user/testService", "hello", localAffinity = true)
expectMsgType[String](3.seconds) should be("hello")
enterBarrier("was-in-contact")
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala
index 3c3ae8fecd..b389a80868 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala
@@ -77,9 +77,10 @@ class ClusterSingletonManagerChaosSpec
def createSingleton(): ActorRef = {
system.actorOf(
- ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Echo], testActor),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system)),
name = "echo")
}
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala
index 083d05fb48..0fa5395352 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala
@@ -75,9 +75,10 @@ class ClusterSingletonManagerDownedSpec
def createSingleton(): ActorRef = {
system.actorOf(
- ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Echo], testActor),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system)),
name = "echo")
}
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala
index 9668febb25..356569026d 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala
@@ -75,9 +75,10 @@ class ClusterSingletonManagerLeaveSpec
def createSingleton(): ActorRef = {
system.actorOf(
- ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor),
- terminationMessage = "stop",
- settings = ClusterSingletonManagerSettings(system)),
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Echo], testActor),
+ terminationMessage = "stop",
+ settings = ClusterSingletonManagerSettings(system)),
name = "echo")
}
@@ -85,9 +86,10 @@ class ClusterSingletonManagerLeaveSpec
lazy val echoProxy: ActorRef = {
echoProxyTerminatedProbe.watch(
- system.actorOf(ClusterSingletonProxy.props(singletonManagerPath = "/user/echo",
- settings = ClusterSingletonProxySettings(system)),
- name = "echoProxy"))
+ system.actorOf(
+ ClusterSingletonProxy
+ .props(singletonManagerPath = "/user/echo", settings = ClusterSingletonProxySettings(system)),
+ name = "echoProxy"))
}
"Leaving ClusterSingletonManager" must {
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala
index e37252faca..4c2f86afbb 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala
@@ -215,9 +215,10 @@ class ClusterSingletonManagerSpec
def createSingleton(): ActorRef = {
//#create-singleton-manager
system.actorOf(
- ClusterSingletonManager.props(singletonProps = Props(classOf[Consumer], queue, testActor),
- terminationMessage = End,
- settings = ClusterSingletonManagerSettings(system).withRole("worker")),
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Consumer], queue, testActor),
+ terminationMessage = End,
+ settings = ClusterSingletonManagerSettings(system).withRole("worker")),
name = "consumer")
//#create-singleton-manager
}
@@ -225,8 +226,9 @@ class ClusterSingletonManagerSpec
def createSingletonProxy(): ActorRef = {
//#create-singleton-proxy
val proxy = system.actorOf(
- ClusterSingletonProxy.props(singletonManagerPath = "/user/consumer",
- settings = ClusterSingletonProxySettings(system).withRole("worker")),
+ ClusterSingletonProxy.props(
+ singletonManagerPath = "/user/consumer",
+ settings = ClusterSingletonProxySettings(system).withRole("worker")),
name = "consumerProxy")
//#create-singleton-proxy
proxy
@@ -235,9 +237,9 @@ class ClusterSingletonManagerSpec
def createSingletonProxyDc(): ActorRef = {
//#create-singleton-proxy-dc
val proxyDcB = system.actorOf(
- ClusterSingletonProxy.props(singletonManagerPath = "/user/consumer",
- settings =
- ClusterSingletonProxySettings(system).withRole("worker").withDataCenter("B")),
+ ClusterSingletonProxy.props(
+ singletonManagerPath = "/user/consumer",
+ settings = ClusterSingletonProxySettings(system).withRole("worker").withDataCenter("B")),
name = "consumerProxyDcB")
//#create-singleton-proxy-dc
proxyDcB
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala
index d4b4f2c3de..e5e700787c 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala
@@ -64,16 +64,18 @@ class ClusterSingletonManagerStartupSpec
def createSingleton(): ActorRef = {
system.actorOf(
- ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Echo], testActor),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system)),
name = "echo")
}
lazy val echoProxy: ActorRef = {
- system.actorOf(ClusterSingletonProxy.props(singletonManagerPath = "/user/echo",
- settings = ClusterSingletonProxySettings(system)),
- name = "echoProxy")
+ system.actorOf(
+ ClusterSingletonProxy
+ .props(singletonManagerPath = "/user/echo", settings = ClusterSingletonProxySettings(system)),
+ name = "echoProxy")
}
"Startup of Cluster Singleton" must {
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala
index 34cae2c2b2..968d7ee44c 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala
@@ -82,10 +82,10 @@ abstract class MultiDcSingletonManagerSpec
"start a singleton instance for each data center" in {
runOn(first, second, third) {
- system.actorOf(ClusterSingletonManager.props(Props[MultiDcSingleton](),
- PoisonPill,
- ClusterSingletonManagerSettings(system).withRole(worker)),
- "singletonManager")
+ system.actorOf(
+ ClusterSingletonManager
+ .props(Props[MultiDcSingleton](), PoisonPill, ClusterSingletonManagerSettings(system).withRole(worker)),
+ "singletonManager")
}
val proxy = system.actorOf(
@@ -113,8 +113,9 @@ abstract class MultiDcSingletonManagerSpec
"be able to use proxy across different data centers" in {
runOn(third) {
val proxy = system.actorOf(
- ClusterSingletonProxy.props("/user/singletonManager",
- ClusterSingletonProxySettings(system).withRole(worker).withDataCenter("one")))
+ ClusterSingletonProxy.props(
+ "/user/singletonManager",
+ ClusterSingletonProxySettings(system).withRole(worker).withDataCenter("one")))
proxy ! MultiDcSingleton.Ping
val pong = expectMsgType[MultiDcSingleton.Pong](10.seconds)
pong.fromDc should ===("one")
diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala
index 5926bd9040..a7289399a2 100644
--- a/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala
+++ b/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala
@@ -21,9 +21,10 @@ class ClusterClientMessageSerializerSpec extends AkkaSpec {
"ClusterClientMessages" must {
"be serializable" in {
- val contactPoints = Vector("akka.tcp://system@node-1:2552/system/receptionist",
- "akka.tcp://system@node-2:2552/system/receptionist",
- "akka.tcp://system@node-3:2552/system/receptionist")
+ val contactPoints = Vector(
+ "akka.tcp://system@node-1:2552/system/receptionist",
+ "akka.tcp://system@node-2:2552/system/receptionist",
+ "akka.tcp://system@node-3:2552/system/receptionist")
checkSerialization(Contacts(contactPoints))
checkSerialization(GetContacts)
checkSerialization(Heartbeat)
diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala
index bec2b9436b..5e1891f764 100644
--- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala
+++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala
@@ -74,9 +74,10 @@ class ClusterSingletonLeavingSpeedSpec
def join(from: ActorSystem, to: ActorSystem, probe: ActorRef): Unit = {
from.actorOf(
- ClusterSingletonManager.props(singletonProps = TheSingleton.props(probe),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(from)),
+ ClusterSingletonManager.props(
+ singletonProps = TheSingleton.props(probe),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(from)),
name = "echo")
Cluster(from).join(Cluster(to).selfAddress)
diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala
index 3548b68147..fc6681b861 100644
--- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala
+++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala
@@ -44,9 +44,10 @@ object ClusterSingletonProxySpec {
cluster.registerOnMemberUp {
system.actorOf(
- ClusterSingletonManager.props(singletonProps = Props[Singleton],
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system).withRemovalMargin(5.seconds)),
+ ClusterSingletonManager.props(
+ singletonProps = Props[Singleton],
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system).withRemovalMargin(5.seconds)),
name = "singletonManager")
}
diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala
index a900cb09f3..48c1360a00 100644
--- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala
+++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala
@@ -47,16 +47,18 @@ class ClusterSingletonRestart2Spec extends AkkaSpec("""
val sys1 = ActorSystem(system.name, system.settings.config)
val sys2 = ActorSystem(system.name, system.settings.config)
- val sys3 = ActorSystem(system.name,
- ConfigFactory.parseString("akka.cluster.roles = [other]").withFallback(system.settings.config))
+ val sys3 = ActorSystem(
+ system.name,
+ ConfigFactory.parseString("akka.cluster.roles = [other]").withFallback(system.settings.config))
var sys4: ActorSystem = null
def join(from: ActorSystem, to: ActorSystem): Unit = {
if (Cluster(from).selfRoles.contains("singleton"))
from.actorOf(
- ClusterSingletonManager.props(singletonProps = ClusterSingletonRestart2Spec.singletonActorProps,
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(from).withRole("singleton")),
+ ClusterSingletonManager.props(
+ singletonProps = ClusterSingletonRestart2Spec.singletonActorProps,
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(from).withRole("singleton")),
name = "echo")
within(45.seconds) {
diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala
index a20c41c326..860d29bf4b 100644
--- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala
+++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala
@@ -36,9 +36,10 @@ class ClusterSingletonRestartSpec extends AkkaSpec("""
def join(from: ActorSystem, to: ActorSystem): Unit = {
from.actorOf(
- ClusterSingletonManager.props(singletonProps = TestActors.echoActorProps,
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(from)),
+ ClusterSingletonManager.props(
+ singletonProps = TestActors.echoActorProps,
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(from)),
name = "echo")
within(10.seconds) {
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala
index 96ba678623..c6de3cf1c6 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala
@@ -26,15 +26,17 @@ import akka.actor.typed.Terminated
import akka.cluster.ddata.typed.javadsl.{ Replicator => JReplicator }
import akka.cluster.ddata.typed.scaladsl.{ Replicator => SReplicator }
- private case class InternalChanged[A <: ReplicatedData](chg: dd.Replicator.Changed[A],
- subscriber: ActorRef[JReplicator.Changed[A]])
+ private case class InternalChanged[A <: ReplicatedData](
+ chg: dd.Replicator.Changed[A],
+ subscriber: ActorRef[JReplicator.Changed[A]])
extends JReplicator.Command
val localAskTimeout = 60.seconds // ReadLocal, WriteLocal shouldn't timeout
val additionalAskTimeout = 1.second
- def behavior(settings: dd.ReplicatorSettings,
- underlyingReplicator: Option[akka.actor.ActorRef]): Behavior[SReplicator.Command] = {
+ def behavior(
+ settings: dd.ReplicatorSettings,
+ underlyingReplicator: Option[akka.actor.ActorRef]): Behavior[SReplicator.Command] = {
Behaviors.setup { ctx =>
val untypedReplicator = underlyingReplicator match {
@@ -46,8 +48,9 @@ import akka.actor.typed.Terminated
}
def withState(
- subscribeAdapters: Map[ActorRef[JReplicator.Changed[ReplicatedData]],
- ActorRef[dd.Replicator.Changed[ReplicatedData]]]): Behavior[SReplicator.Command] = {
+ subscribeAdapters: Map[
+ ActorRef[JReplicator.Changed[ReplicatedData]],
+ ActorRef[dd.Replicator.Changed[ReplicatedData]]]): Behavior[SReplicator.Command] = {
def stopSubscribeAdapter(
subscriber: ActorRef[JReplicator.Changed[ReplicatedData]]): Behavior[SReplicator.Command] = {
@@ -65,8 +68,9 @@ import akka.actor.typed.Terminated
.receive[SReplicator.Command] { (ctx, msg) =>
msg match {
case cmd: SReplicator.Get[_] =>
- untypedReplicator.tell(dd.Replicator.Get(cmd.key, cmd.consistency, cmd.request),
- sender = cmd.replyTo.toUntyped)
+ untypedReplicator.tell(
+ dd.Replicator.Get(cmd.key, cmd.consistency, cmd.request),
+ sender = cmd.replyTo.toUntyped)
Behaviors.same
case cmd: JReplicator.Get[d] =>
@@ -91,8 +95,9 @@ import akka.actor.typed.Terminated
Behaviors.same
case cmd: SReplicator.Update[_] =>
- untypedReplicator.tell(dd.Replicator.Update(cmd.key, cmd.writeConsistency, cmd.request)(cmd.modify),
- sender = cmd.replyTo.toUntyped)
+ untypedReplicator.tell(
+ dd.Replicator.Update(cmd.key, cmd.writeConsistency, cmd.request)(cmd.modify),
+ sender = cmd.replyTo.toUntyped)
Behaviors.same
case cmd: JReplicator.Update[d] =>
@@ -102,9 +107,10 @@ import akka.actor.typed.Terminated
})
import ctx.executionContext
val reply =
- (untypedReplicator ? dd.Replicator.Update(cmd.key,
- cmd.writeConsistency.toUntyped,
- cmd.request.asScala)(cmd.modify))
+ (untypedReplicator ? dd.Replicator.Update(
+ cmd.key,
+ cmd.writeConsistency.toUntyped,
+ cmd.request.asScala)(cmd.modify))
.mapTo[dd.Replicator.UpdateResponse[d]]
.map {
case rsp: dd.Replicator.UpdateSuccess[d] => JReplicator.UpdateSuccess(rsp.key, rsp.request.asJava)
@@ -121,8 +127,9 @@ import akka.actor.typed.Terminated
case cmd: SReplicator.Subscribe[_] =>
// For the Scala API the Changed messages can be sent directly to the subscriber
- untypedReplicator.tell(dd.Replicator.Subscribe(cmd.key, cmd.subscriber.toUntyped),
- sender = cmd.subscriber.toUntyped)
+ untypedReplicator.tell(
+ dd.Replicator.Subscribe(cmd.key, cmd.subscriber.toUntyped),
+ sender = cmd.subscriber.toUntyped)
Behaviors.same
case cmd: JReplicator.Subscribe[ReplicatedData] @unchecked =>
@@ -133,8 +140,9 @@ import akka.actor.typed.Terminated
InternalChanged(chg, cmd.subscriber)
}
- untypedReplicator.tell(dd.Replicator.Subscribe(cmd.key, adapter.toUntyped),
- sender = akka.actor.ActorRef.noSender)
+ untypedReplicator.tell(
+ dd.Replicator.Subscribe(cmd.key, adapter.toUntyped),
+ sender = akka.actor.ActorRef.noSender)
ctx.watch(cmd.subscriber)
@@ -148,8 +156,9 @@ import akka.actor.typed.Terminated
stopSubscribeAdapter(cmd.subscriber)
case cmd: SReplicator.Delete[_] =>
- untypedReplicator.tell(dd.Replicator.Delete(cmd.key, cmd.consistency, cmd.request),
- sender = cmd.replyTo.toUntyped)
+ untypedReplicator.tell(
+ dd.Replicator.Delete(cmd.key, cmd.consistency, cmd.request),
+ sender = cmd.replyTo.toUntyped)
Behaviors.same
case cmd: JReplicator.Delete[d] =>
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala
index 77645907a3..f8f4153ca8 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala
@@ -121,10 +121,11 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or maintain local correlation data structures.
*/
- final case class Get[A <: ReplicatedData](key: Key[A],
- consistency: ReadConsistency,
- replyTo: ActorRef[GetResponse[A]],
- request: Optional[Any])
+ final case class Get[A <: ReplicatedData](
+ key: Key[A],
+ consistency: ReadConsistency,
+ replyTo: ActorRef[GetResponse[A]],
+ request: Optional[Any])
extends Command {
def this(key: Key[A], consistency: ReadConsistency, replyTo: ActorRef[GetResponse[A]]) =
@@ -185,10 +186,11 @@ object Replicator {
* function that only uses the data parameter and stable fields from enclosing scope. It must
* for example not access `sender()` reference of an enclosing actor.
*/
- final case class Update[A <: ReplicatedData] private (key: Key[A],
- writeConsistency: WriteConsistency,
- replyTo: ActorRef[UpdateResponse[A]],
- request: Optional[Any])(val modify: Option[A] => A)
+ final case class Update[A <: ReplicatedData] private (
+ key: Key[A],
+ writeConsistency: WriteConsistency,
+ replyTo: ActorRef[UpdateResponse[A]],
+ request: Optional[Any])(val modify: Option[A] => A)
extends Command
with NoSerializationVerificationNeeded {
@@ -199,11 +201,12 @@ object Replicator {
* If there is no current data value for the `key` the `initial` value will be
* passed to the `modify` function.
*/
- def this(key: Key[A],
- initial: A,
- writeConsistency: WriteConsistency,
- replyTo: ActorRef[UpdateResponse[A]],
- modify: JFunction[A, A]) =
+ def this(
+ key: Key[A],
+ initial: A,
+ writeConsistency: WriteConsistency,
+ replyTo: ActorRef[UpdateResponse[A]],
+ modify: JFunction[A, A]) =
this(key, writeConsistency, replyTo, Optional.empty[Any])(
Update.modifyWithInitial(initial, data => modify.apply(data)))
@@ -218,12 +221,13 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or local correlation data structures.
*/
- def this(key: Key[A],
- initial: A,
- writeConsistency: WriteConsistency,
- replyTo: ActorRef[UpdateResponse[A]],
- request: Optional[Any],
- modify: JFunction[A, A]) =
+ def this(
+ key: Key[A],
+ initial: A,
+ writeConsistency: WriteConsistency,
+ replyTo: ActorRef[UpdateResponse[A]],
+ request: Optional[Any],
+ modify: JFunction[A, A]) =
this(key, writeConsistency, replyTo, request)(Update.modifyWithInitial(initial, data => modify.apply(data)))
}
@@ -254,10 +258,11 @@ object Replicator {
* If the `modify` function of the [[Update]] throws an exception the reply message
* will be this `ModifyFailure` message. The original exception is included as `cause`.
*/
- final case class ModifyFailure[A <: ReplicatedData](key: Key[A],
- errorMessage: String,
- cause: Throwable,
- request: Optional[Any])
+ final case class ModifyFailure[A <: ReplicatedData](
+ key: Key[A],
+ errorMessage: String,
+ cause: Throwable,
+ request: Optional[Any])
extends UpdateFailure[A] {
override def toString: String = s"ModifyFailure [$key]: $errorMessage"
}
@@ -332,10 +337,11 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or maintain local correlation data structures.
*/
- final case class Delete[A <: ReplicatedData](key: Key[A],
- consistency: WriteConsistency,
- replyTo: ActorRef[DeleteResponse[A]],
- request: Optional[Any])
+ final case class Delete[A <: ReplicatedData](
+ key: Key[A],
+ consistency: WriteConsistency,
+ replyTo: ActorRef[DeleteResponse[A]],
+ request: Optional[Any])
extends Command
with NoSerializationVerificationNeeded {
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala
index 3574de80cb..0e077c9caa 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala
@@ -62,10 +62,11 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or maintain local correlation data structures.
*/
- final case class Get[A <: ReplicatedData](key: Key[A],
- consistency: ReadConsistency,
- replyTo: ActorRef[GetResponse[A]],
- request: Option[Any] = None)
+ final case class Get[A <: ReplicatedData](
+ key: Key[A],
+ consistency: ReadConsistency,
+ replyTo: ActorRef[GetResponse[A]],
+ request: Option[Any] = None)
extends Command
/**
@@ -97,11 +98,12 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or local correlation data structures.
*/
- def apply[A <: ReplicatedData](key: Key[A],
- initial: A,
- writeConsistency: WriteConsistency,
- replyTo: ActorRef[UpdateResponse[A]],
- request: Option[Any] = None)(modify: A => A): Update[A] =
+ def apply[A <: ReplicatedData](
+ key: Key[A],
+ initial: A,
+ writeConsistency: WriteConsistency,
+ replyTo: ActorRef[UpdateResponse[A]],
+ request: Option[Any] = None)(modify: A => A): Update[A] =
Update(key, writeConsistency, replyTo, request)(modifyWithInitial(initial, modify))
/**
@@ -133,10 +135,11 @@ object Replicator {
* function that only uses the data parameter and stable fields from enclosing scope. It must
* for example not access `sender()` reference of an enclosing actor.
*/
- final case class Update[A <: ReplicatedData](key: Key[A],
- writeConsistency: WriteConsistency,
- replyTo: ActorRef[UpdateResponse[A]],
- request: Option[Any])(val modify: Option[A] => A)
+ final case class Update[A <: ReplicatedData](
+ key: Key[A],
+ writeConsistency: WriteConsistency,
+ replyTo: ActorRef[UpdateResponse[A]],
+ request: Option[Any])(val modify: Option[A] => A)
extends Command
with NoSerializationVerificationNeeded {}
@@ -212,8 +215,9 @@ object Replicator {
/**
* Convenience for `ask`.
*/
- def apply[A <: ReplicatedData](key: Key[A],
- consistency: WriteConsistency): ActorRef[DeleteResponse[A]] => Delete[A] =
+ def apply[A <: ReplicatedData](
+ key: Key[A],
+ consistency: WriteConsistency): ActorRef[DeleteResponse[A]] => Delete[A] =
(replyTo => Delete(key, consistency, replyTo, None))
}
@@ -225,10 +229,11 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or maintain local correlation data structures.
*/
- final case class Delete[A <: ReplicatedData](key: Key[A],
- consistency: WriteConsistency,
- replyTo: ActorRef[DeleteResponse[A]],
- request: Option[Any])
+ final case class Delete[A <: ReplicatedData](
+ key: Key[A],
+ consistency: WriteConsistency,
+ replyTo: ActorRef[DeleteResponse[A]],
+ request: Option[Any])
extends Command
with NoSerializationVerificationNeeded
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala
index e79919ab11..894263fe7e 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala
@@ -34,21 +34,23 @@ object ClusterSingletonSettings {
// currently singleton name is required and then discarded, for example
val mgrSettings = ClusterSingletonManagerSettings(config.getConfig("singleton"))
val proxySettings = ClusterSingletonProxySettings(config.getConfig("singleton-proxy"))
- new ClusterSingletonSettings(mgrSettings.role,
- proxySettings.dataCenter,
- proxySettings.singletonIdentificationInterval,
- mgrSettings.removalMargin,
- mgrSettings.handOverRetryInterval,
- proxySettings.bufferSize)
+ new ClusterSingletonSettings(
+ mgrSettings.role,
+ proxySettings.dataCenter,
+ proxySettings.singletonIdentificationInterval,
+ mgrSettings.removalMargin,
+ mgrSettings.handOverRetryInterval,
+ proxySettings.bufferSize)
}
}
-final class ClusterSingletonSettings(val role: Option[String],
- val dataCenter: Option[DataCenter],
- val singletonIdentificationInterval: FiniteDuration,
- val removalMargin: FiniteDuration,
- val handOverRetryInterval: FiniteDuration,
- val bufferSize: Int)
+final class ClusterSingletonSettings(
+ val role: Option[String],
+ val dataCenter: Option[DataCenter],
+ val singletonIdentificationInterval: FiniteDuration,
+ val removalMargin: FiniteDuration,
+ val handOverRetryInterval: FiniteDuration,
+ val bufferSize: Int)
extends NoSerializationVerificationNeeded {
def withRole(role: String): ClusterSingletonSettings = copy(role = Some(role))
@@ -70,18 +72,20 @@ final class ClusterSingletonSettings(val role: Option[String],
def withBufferSize(bufferSize: Int): ClusterSingletonSettings = copy(bufferSize = bufferSize)
- private def copy(role: Option[String] = role,
- dataCenter: Option[DataCenter] = dataCenter,
- singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval,
- removalMargin: FiniteDuration = removalMargin,
- handOverRetryInterval: FiniteDuration = handOverRetryInterval,
- bufferSize: Int = bufferSize) =
- new ClusterSingletonSettings(role,
- dataCenter,
- singletonIdentificationInterval,
- removalMargin,
- handOverRetryInterval,
- bufferSize)
+ private def copy(
+ role: Option[String] = role,
+ dataCenter: Option[DataCenter] = dataCenter,
+ singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval,
+ removalMargin: FiniteDuration = removalMargin,
+ handOverRetryInterval: FiniteDuration = handOverRetryInterval,
+ bufferSize: Int = bufferSize) =
+ new ClusterSingletonSettings(
+ role,
+ dataCenter,
+ singletonIdentificationInterval,
+ removalMargin,
+ handOverRetryInterval,
+ bufferSize)
/**
* INTERNAL API:
@@ -148,11 +152,12 @@ object SingletonActor {
def of[M](behavior: Behavior[M], name: String): SingletonActor[M] = apply(behavior, name)
}
-final class SingletonActor[M] private (val behavior: Behavior[M],
- val name: String,
- val props: Props,
- val stopMessage: Option[M],
- val settings: Option[ClusterSingletonSettings]) {
+final class SingletonActor[M] private (
+ val behavior: Behavior[M],
+ val name: String,
+ val props: Props,
+ val stopMessage: Option[M],
+ val settings: Option[ClusterSingletonSettings]) {
/**
* [[akka.actor.typed.Props]] of the singleton actor, such as dispatcher settings.
@@ -172,10 +177,11 @@ final class SingletonActor[M] private (val behavior: Behavior[M],
*/
def withSettings(settings: ClusterSingletonSettings): SingletonActor[M] = copy(settings = Option(settings))
- private def copy(behavior: Behavior[M] = behavior,
- props: Props = props,
- stopMessage: Option[M] = stopMessage,
- settings: Option[ClusterSingletonSettings] = settings): SingletonActor[M] =
+ private def copy(
+ behavior: Behavior[M] = behavior,
+ props: Props = props,
+ stopMessage: Option[M] = stopMessage,
+ settings: Option[ClusterSingletonSettings] = settings): SingletonActor[M] =
new SingletonActor[M](behavior, name, props, stopMessage, settings)
}
@@ -214,11 +220,11 @@ object ClusterSingletonManagerSettings {
* the default configuration `akka.cluster.singleton`.
*/
def apply(config: Config): ClusterSingletonManagerSettings =
- new ClusterSingletonManagerSettings(singletonName = config.getString("singleton-name"),
- role = roleOption(config.getString("role")),
- removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin
- handOverRetryInterval =
- config.getDuration("hand-over-retry-interval", MILLISECONDS).millis)
+ new ClusterSingletonManagerSettings(
+ singletonName = config.getString("singleton-name"),
+ role = roleOption(config.getString("role")),
+ removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin
+ handOverRetryInterval = config.getDuration("hand-over-retry-interval", MILLISECONDS).millis)
/**
* Java API: Create settings from the default configuration
@@ -260,10 +266,11 @@ object ClusterSingletonManagerSettings {
* over has started or the previous oldest member is removed from the cluster
* (+ `removalMargin`).
*/
-final class ClusterSingletonManagerSettings(val singletonName: String,
- val role: Option[String],
- val removalMargin: FiniteDuration,
- val handOverRetryInterval: FiniteDuration)
+final class ClusterSingletonManagerSettings(
+ val singletonName: String,
+ val role: Option[String],
+ val removalMargin: FiniteDuration,
+ val handOverRetryInterval: FiniteDuration)
extends NoSerializationVerificationNeeded {
def withSingletonName(name: String): ClusterSingletonManagerSettings = copy(singletonName = name)
@@ -283,10 +290,11 @@ final class ClusterSingletonManagerSettings(val singletonName: String,
def withHandOverRetryInterval(retryInterval: java.time.Duration): ClusterSingletonManagerSettings =
withHandOverRetryInterval(retryInterval.asScala)
- private def copy(singletonName: String = singletonName,
- role: Option[String] = role,
- removalMargin: FiniteDuration = removalMargin,
- handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings =
+ private def copy(
+ singletonName: String = singletonName,
+ role: Option[String] = role,
+ removalMargin: FiniteDuration = removalMargin,
+ handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings =
new ClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval)
}
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala
index 1a0a7b4965..7d2981dd9a 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala
@@ -76,9 +76,8 @@ private[akka] object AdapterClusterImpl {
Behaviors.same
case Subscribe(subscriber, eventClass) =>
- adaptedCluster.subscribe(subscriber.toUntyped,
- initialStateMode = ClusterEvent.initialStateAsEvents,
- eventClass)
+ adaptedCluster
+ .subscribe(subscriber.toUntyped, initialStateMode = ClusterEvent.initialStateAsEvents, eventClass)
Behaviors.same
case Unsubscribe(subscriber) =>
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala
index cd85c38874..3662eaebef 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala
@@ -23,8 +23,9 @@ import akka.cluster.typed
*/
@InternalApi
private[akka] final class AdaptedClusterSingletonImpl(system: ActorSystem[_]) extends ClusterSingleton {
- require(system.isInstanceOf[ActorSystemAdapter[_]],
- "only adapted actor systems can be used for the typed cluster singleton")
+ require(
+ system.isInstanceOf[ActorSystemAdapter[_]],
+ "only adapted actor systems can be used for the typed cluster singleton")
import ClusterSingletonImpl._
import akka.actor.typed.scaladsl.adapter._
@@ -51,10 +52,12 @@ private[akka] final class AdaptedClusterSingletonImpl(system: ActorSystem[_]) ex
// start singleton on this node
val untypedProps = PropsAdapter(poisonPillInterceptor(singleton.behavior), singleton.props)
try {
- untypedSystem.systemActorOf(OldSingletonManager.props(untypedProps,
- singleton.stopMessage.getOrElse(PoisonPill),
- settings.toManagerSettings(singleton.name)),
- managerName)
+ untypedSystem.systemActorOf(
+ OldSingletonManager.props(
+ untypedProps,
+ singleton.stopMessage.getOrElse(PoisonPill),
+ settings.toManagerSettings(singleton.name)),
+ managerName)
} catch {
case ex: InvalidActorNameException if ex.getMessage.endsWith("is not unique!") =>
// This is fine. We just wanted to make sure it is running and it already is
@@ -70,9 +73,10 @@ private[akka] final class AdaptedClusterSingletonImpl(system: ActorSystem[_]) ex
println("Creating for " + singletonNameAndDc)
val (singletonName, _) = singletonNameAndDc
val proxyName = s"singletonProxy$singletonName-${settings.dataCenter.getOrElse("no-dc")}"
- untypedSystem.systemActorOf(ClusterSingletonProxy.props(s"/system/${managerNameFor(singletonName)}",
- settings.toProxySettings(singletonName)),
- proxyName)
+ untypedSystem.systemActorOf(
+ ClusterSingletonProxy
+ .props(s"/system/${managerNameFor(singletonName)}", settings.toProxySettings(singletonName)),
+ proxyName)
}
}
proxies.computeIfAbsent((name, settings.dataCenter), proxyCreator).asInstanceOf[ActorRef[T]]
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala
index 0e38abff8b..eef2946c5f 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala
@@ -82,8 +82,9 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider {
val replicatorMessageAdapter: ActorRef[Replicator.ReplicatorMessage] =
ctx.messageAdapter[Replicator.ReplicatorMessage] {
case changed: Replicator.Changed[_] @unchecked =>
- ChangeFromReplicator(changed.key.asInstanceOf[DDataKey],
- changed.dataValue.asInstanceOf[ORMultiMap[ServiceKey[_], Entry]])
+ ChangeFromReplicator(
+ changed.key.asInstanceOf[DDataKey],
+ changed.dataValue.asInstanceOf[ORMultiMap[ServiceKey[_], Entry]])
}
registry.allDdataKeys.foreach(key =>
@@ -92,9 +93,10 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider {
// remove entries when members are removed
val clusterEventMessageAdapter: ActorRef[MemberRemoved] =
ctx.messageAdapter[MemberRemoved] { case MemberRemoved(member, _) => NodeRemoved(member.uniqueAddress) }
- setup.cluster.subscribe(clusterEventMessageAdapter.toUntyped,
- ClusterEvent.InitialStateAsEvents,
- classOf[MemberRemoved])
+ setup.cluster.subscribe(
+ clusterEventMessageAdapter.toUntyped,
+ ClusterEvent.InitialStateAsEvents,
+ classOf[MemberRemoved])
// also periodic cleanup in case removal from ORMultiMap is skipped due to concurrent update,
// which is possible for OR CRDTs - done with an adapter to leverage the existing NodesRemoved message
@@ -161,13 +163,14 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider {
if (removals.nonEmpty) {
if (ctx.log.isDebugEnabled)
- ctx.log.debug("Node(s) [{}] removed, updating registry removing: [{}]",
- addresses.mkString(","),
- removals
- .map {
- case (key, entries) => key.asServiceKey.id -> entries.mkString("[", ", ", "]")
- }
- .mkString(","))
+ ctx.log.debug(
+ "Node(s) [{}] removed, updating registry removing: [{}]",
+ addresses.mkString(","),
+ removals
+ .map {
+ case (key, entries) => key.asServiceKey.id -> entries.mkString("[", ", ", "]")
+ }
+ .mkString(","))
// shard changes over the ddata keys they belong to
val removalsPerDdataKey = registry.entriesPerDdataKey(removals)
@@ -236,12 +239,13 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider {
val newRegistry = registry.withServiceRegistry(ddataKey, newState)
if (changedKeys.nonEmpty) {
if (ctx.log.isDebugEnabled) {
- ctx.log.debug("Change from replicator: [{}], changes: [{}], tombstones [{}]",
- newState.entries.entries,
- changedKeys
- .map(key => key.asServiceKey.id -> newState.entriesFor(key).mkString("[", ", ", "]"))
- .mkString(", "),
- registry.tombstones.mkString(", "))
+ ctx.log.debug(
+ "Change from replicator: [{}], changes: [{}], tombstones [{}]",
+ newState.entries.entries,
+ changedKeys
+ .map(key => key.asServiceKey.id -> newState.entriesFor(key).mkString("[", ", ", "]"))
+ .mkString(", "),
+ registry.tombstones.mkString(", "))
}
changedKeys.foreach { changedKey =>
notifySubscribersFor(changedKey, newState)
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala
index 9a30f89cce..4b69d9096c 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala
@@ -33,9 +33,10 @@ private[akka] object ClusterReceptionistSettings {
case _ => Replicator.WriteTo(config.getInt(key), writeTimeout)
}
}
- ClusterReceptionistSettings(writeConsistency,
- pruningInterval = config.getDuration("pruning-interval", MILLISECONDS).millis,
- config.getInt("distributed-key-count"))
+ ClusterReceptionistSettings(
+ writeConsistency,
+ pruningInterval = config.getDuration("pruning-interval", MILLISECONDS).millis,
+ config.getInt("distributed-key-count"))
}
}
@@ -43,6 +44,7 @@ private[akka] object ClusterReceptionistSettings {
* Internal API
*/
@InternalApi
-private[akka] case class ClusterReceptionistSettings(writeConsistency: WriteConsistency,
- pruningInterval: FiniteDuration,
- distributedKeyCount: Int)
+private[akka] case class ClusterReceptionistSettings(
+ writeConsistency: WriteConsistency,
+ pruningInterval: FiniteDuration,
+ distributedKeyCount: Int)
diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala
index e53d67fae8..2494dde7ba 100644
--- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala
+++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala
@@ -37,8 +37,9 @@ import scala.concurrent.duration.Deadline
* the service key
* INTERNAL API
*/
-@InternalApi private[akka] final case class ShardedServiceRegistry(serviceRegistries: Map[DDataKey, ServiceRegistry],
- tombstones: Map[ActorRef[_], Deadline]) {
+@InternalApi private[akka] final case class ShardedServiceRegistry(
+ serviceRegistries: Map[DDataKey, ServiceRegistry],
+ tombstones: Map[ActorRef[_], Deadline]) {
private val keys = serviceRegistries.keySet.toArray
diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala
index 7ab26cde54..7358303b88 100644
--- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala
+++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala
@@ -42,9 +42,10 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll with
"An ActorSystem" must {
"start the guardian actor and terminate when it terminates" in {
- val t = withSystem("a",
- Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.msg; Behaviors.stopped },
- doTerminate = false) { sys =>
+ val t = withSystem(
+ "a",
+ Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.msg; Behaviors.stopped },
+ doTerminate = false) { sys =>
val inbox = TestInbox[String]("a")
sys ! Probe("hello", inbox.ref)
eventually {
@@ -67,16 +68,17 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll with
"terminate the guardian actor" in {
val inbox = TestInbox[String]("terminate")
- val sys = system(Behaviors
- .receive[Probe] {
- case (_, _) => Behaviors.unhandled
- }
- .receiveSignal {
- case (_, PostStop) =>
- inbox.ref ! "done"
- Behaviors.same
- },
- "terminate")
+ val sys = system(
+ Behaviors
+ .receive[Probe] {
+ case (_, _) => Behaviors.unhandled
+ }
+ .receiveSignal {
+ case (_, PostStop) =>
+ inbox.ref ! "done"
+ Behaviors.same
+ },
+ "terminate")
sys.terminate().futureValue
inbox.receiveAll() should ===("done" :: Nil)
}
diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala
index 2a41a17f3d..98763c5e40 100644
--- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala
+++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala
@@ -93,8 +93,9 @@ class ClusterSingletonApiSpec extends ScalaTestWithActorTestKit(ClusterSingleton
val clusterNode1 = Cluster(system)
val untypedSystem1 = system.toUntyped
- val system2 = akka.actor.ActorSystem(system.name,
- ConfigFactory.parseString("""
+ val system2 = akka.actor.ActorSystem(
+ system.name,
+ ConfigFactory.parseString("""
akka.cluster.roles = ["singleton"]
""").withFallback(system.settings.config))
val adaptedSystem2 = system2.toTyped
diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala
index 3464fbedf9..014cd13ef6 100644
--- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala
+++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala
@@ -35,18 +35,18 @@ object ClusterSingletonPersistenceSpec {
private final case object StopPlz extends Command
val persistentActor: Behavior[Command] =
- EventSourcedBehavior[Command, String, String](persistenceId = PersistenceId("TheSingleton"),
- emptyState = "",
- commandHandler = (state, cmd) =>
- cmd match {
- case Add(s) => Effect.persist(s)
- case Get(replyTo) =>
- replyTo ! state
- Effect.none
- case StopPlz => Effect.stop()
- },
- eventHandler =
- (state, evt) => if (state.isEmpty) evt else state + "|" + evt)
+ EventSourcedBehavior[Command, String, String](
+ persistenceId = PersistenceId("TheSingleton"),
+ emptyState = "",
+ commandHandler = (state, cmd) =>
+ cmd match {
+ case Add(s) => Effect.persist(s)
+ case Get(replyTo) =>
+ replyTo ! state
+ Effect.none
+ case StopPlz => Effect.stop()
+ },
+ eventHandler = (state, evt) => if (state.isEmpty) evt else state + "|" + evt)
}
diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala
index 48e6cdaae0..2a2c4420fc 100644
--- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala
+++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala
@@ -86,9 +86,10 @@ class RemoteDeployNotAllowedSpec
}
val system2 =
- ActorSystem(guardianBehavior,
- system.name,
- RemoteDeployNotAllowedSpec.configWithRemoteDeployment(node1.selfMember.address.port.get))
+ ActorSystem(
+ guardianBehavior,
+ system.name,
+ RemoteDeployNotAllowedSpec.configWithRemoteDeployment(node1.selfMember.address.port.get))
try {
val node2 = Cluster(system2)
node2.manager ! Join(node1.selfMember.address)
diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala
index 92deca471e..26f1a5c552 100644
--- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala
+++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala
@@ -252,17 +252,19 @@ class ClusterReceptionistSpec extends WordSpec with Matchers {
try {
val system3 = testKit3.system
- system1.log.debug("Starting system3 at same hostname port as system2, uid: [{}]",
- Cluster(system3).selfMember.uniqueAddress.longUid)
+ system1.log.debug(
+ "Starting system3 at same hostname port as system2, uid: [{}]",
+ Cluster(system3).selfMember.uniqueAddress.longUid)
val clusterNode3 = Cluster(system3)
clusterNode3.manager ! Join(clusterNode1.selfMember.address)
val regProbe3 = TestProbe[Any]()(system3)
// and registers the same service key
val service3 = testKit3.spawn(pingPongBehavior, "instance")
- system3.log.debug("Spawning/registering ping service in new incarnation {}#{}",
- service3.path,
- service3.path.uid)
+ system3.log.debug(
+ "Spawning/registering ping service in new incarnation {}#{}",
+ service3.path,
+ service3.path.uid)
system3.receptionist ! Register(PingKey, service3, regProbe3.ref)
regProbe3.expectMessage(Registered(PingKey, service3))
system3.log.debug("Registered actor [{}#{}] for system3", service3.path, service3.path.uid)
diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala
index 2ccacfae88..f5bad04fbf 100644
--- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala
+++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala
@@ -181,15 +181,12 @@ class BasicClusterManualSpec extends WordSpec with ScalaFutures with Eventually
probe1.expectMessageType[MemberUp].member.address shouldEqual cluster3.selfMember.address
}
eventually {
- cluster1.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up,
- MemberStatus.up,
- MemberStatus.up)
- cluster2.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up,
- MemberStatus.up,
- MemberStatus.up)
- cluster3.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up,
- MemberStatus.up,
- MemberStatus.up)
+ cluster1.state.members.toList
+ .map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up, MemberStatus.up)
+ cluster2.state.members.toList
+ .map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up, MemberStatus.up)
+ cluster3.state.members.toList
+ .map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up, MemberStatus.up)
}
//#cluster-leave-example
diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala
index d37b5da8e1..19fd557385 100644
--- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala
+++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala
@@ -51,9 +51,10 @@ object SingletonCompileOnlySpec {
//#backoff
val proxyBackOff: ActorRef[CounterCommand] = singletonManager.init(
- SingletonActor(Behaviors
- .supervise(counter(0))
- .onFailure[Exception](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 0.2)),
- "GlobalCounter"))
+ SingletonActor(
+ Behaviors
+ .supervise(counter(0))
+ .onFailure[Exception](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 0.2)),
+ "GlobalCounter"))
//#backoff
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala
index 34490d67e5..3bb501cc6f 100644
--- a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala
@@ -79,10 +79,11 @@ private[cluster] class AutoDown(autoDownUnreachableAfter: FiniteDuration)
override def down(node: Address): Unit = {
require(leader)
- logInfo("Leader is auto-downing unreachable node [{}]. " +
- "Don't use auto-down feature of Akka Cluster in production. " +
- "See 'Auto-downing (DO NOT USE)' section of Akka Cluster documentation.",
- node)
+ logInfo(
+ "Leader is auto-downing unreachable node [{}]. " +
+ "Don't use auto-down feature of Akka Cluster in production. " +
+ "See 'Auto-downing (DO NOT USE)' section of Akka Cluster documentation.",
+ node)
cluster.down(node)
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
index 006e7ce093..70097c4331 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
@@ -113,9 +113,10 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
val crossDcFailureDetector: FailureDetectorRegistry[Address] = {
val createFailureDetector = () =>
- FailureDetectorLoader.load(settings.MultiDataCenter.CrossDcFailureDetectorSettings.ImplementationClass,
- settings.MultiDataCenter.CrossDcFailureDetectorSettings.config,
- system)
+ FailureDetectorLoader.load(
+ settings.MultiDataCenter.CrossDcFailureDetectorSettings.ImplementationClass,
+ settings.MultiDataCenter.CrossDcFailureDetectorSettings.config,
+ system)
new DefaultFailureDetectorRegistry(createFailureDetector)
}
@@ -133,10 +134,11 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
*/
private[cluster] val scheduler: Scheduler = {
if (system.scheduler.maxFrequency < 1.second / SchedulerTickDuration) {
- logInfo("Using a dedicated scheduler for cluster. Default scheduler can be used if configured " +
- "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].",
- (1000 / system.scheduler.maxFrequency).toInt,
- SchedulerTickDuration.toMillis)
+ logInfo(
+ "Using a dedicated scheduler for cluster. Default scheduler can be used if configured " +
+ "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].",
+ (1000 / system.scheduler.maxFrequency).toInt,
+ SchedulerTickDuration.toMillis)
val cfg = ConfigFactory
.parseString(s"akka.scheduler.tick-duration=${SchedulerTickDuration.toMillis}ms")
@@ -146,10 +148,10 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
case tf => tf
}
system.dynamicAccess
- .createInstanceFor[Scheduler](system.settings.SchedulerClass,
- immutable.Seq(classOf[Config] -> cfg,
- classOf[LoggingAdapter] -> log,
- classOf[ThreadFactory] -> threadFactory))
+ .createInstanceFor[Scheduler](
+ system.settings.SchedulerClass,
+ immutable
+ .Seq(classOf[Config] -> cfg, classOf[LoggingAdapter] -> log, classOf[ThreadFactory] -> threadFactory))
.get
} else {
// delegate to system.scheduler, but don't close over system
@@ -255,8 +257,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
*/
@varargs def subscribe(subscriber: ActorRef, initialStateMode: SubscriptionInitialStateMode, to: Class[_]*): Unit = {
require(to.length > 0, "at least one `ClusterDomainEvent` class is required")
- require(to.forall(classOf[ClusterDomainEvent].isAssignableFrom),
- s"subscribe to `akka.cluster.ClusterEvent.ClusterDomainEvent` or subclasses, was [${to.map(_.getName).mkString(", ")}]")
+ require(
+ to.forall(classOf[ClusterDomainEvent].isAssignableFrom),
+ s"subscribe to `akka.cluster.ClusterEvent.ClusterDomainEvent` or subclasses, was [${to.map(_.getName).mkString(", ")}]")
clusterCore ! InternalClusterAction.Subscribe(subscriber, initialStateMode, to.toSet)
}
@@ -518,12 +521,13 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter)
log.error(cause, "Cluster Node [{}] - " + template, selfAddress, arg1, arg2, arg3)
else
- log.error(cause,
- "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template,
- selfAddress,
- arg1,
- arg2,
- arg3)
+ log.error(
+ cause,
+ "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template,
+ selfAddress,
+ arg1,
+ arg2,
+ arg3)
}
private def logAtLevel(logLevel: LogLevel, message: String): Unit = {
@@ -554,12 +558,13 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter)
log.log(logLevel, "Cluster Node [{}] - " + template, selfAddress, arg1, arg2, arg3)
else
- log.log(logLevel,
- "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template,
- selfAddress,
- arg1,
- arg2,
- arg3)
+ log.log(
+ logLevel,
+ "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template,
+ selfAddress,
+ arg1,
+ arg2,
+ arg3)
private def isLevelEnabled(logLevel: LogLevel): Boolean =
LogInfo || logLevel < Logging.InfoLevel
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala
index 9b458732a9..3c5681e1cc 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala
@@ -26,10 +26,11 @@ import com.typesafe.config.ConfigFactory
* extension, i.e. the cluster will automatically be started when
* the `ClusterActorRefProvider` is used.
*/
-private[akka] class ClusterActorRefProvider(_systemName: String,
- _settings: ActorSystem.Settings,
- _eventStream: EventStream,
- _dynamicAccess: DynamicAccess)
+private[akka] class ClusterActorRefProvider(
+ _systemName: String,
+ _settings: ActorSystem.Settings,
+ _eventStream: EventStream,
+ _dynamicAccess: DynamicAccess)
extends RemoteActorRefProvider(_systemName, _settings, _eventStream, _dynamicAccess) {
override def init(system: ActorSystemImpl): Unit = {
@@ -46,10 +47,11 @@ private[akka] class ClusterActorRefProvider(_systemName: String,
import remoteSettings._
val failureDetector = createRemoteWatcherFailureDetector(system)
system.systemActorOf(
- ClusterRemoteWatcher.props(failureDetector,
- heartbeatInterval = WatchHeartBeatInterval,
- unreachableReaperInterval = WatchUnreachableReaperInterval,
- heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter),
+ ClusterRemoteWatcher.props(
+ failureDetector,
+ heartbeatInterval = WatchHeartBeatInterval,
+ unreachableReaperInterval = WatchUnreachableReaperInterval,
+ heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter),
"remote-watcher")
}
@@ -93,12 +95,14 @@ private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: Dynami
deploy.routerConfig match {
case r: Pool =>
Some(
- deploy.copy(routerConfig = ClusterRouterPool(r, ClusterRouterPoolSettings.fromConfig(deploy.config)),
- scope = ClusterScope))
+ deploy.copy(
+ routerConfig = ClusterRouterPool(r, ClusterRouterPoolSettings.fromConfig(deploy.config)),
+ scope = ClusterScope))
case r: Group =>
Some(
- deploy.copy(routerConfig = ClusterRouterGroup(r, ClusterRouterGroupSettings.fromConfig(deploy.config)),
- scope = ClusterScope))
+ deploy.copy(
+ routerConfig = ClusterRouterGroup(r, ClusterRouterGroupSettings.fromConfig(deploy.config)),
+ scope = ClusterScope))
case other =>
throw new IllegalArgumentException(
s"Cluster aware router can only wrap Pool or Group, got [${other.getClass.getName}]")
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala
index f6bdc2626d..2a3f8c9a59 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala
@@ -223,8 +223,9 @@ private[cluster] final class ClusterDaemon(joinConfigCompatChecker: JoinConfigCo
context.actorOf(
Props(classOf[ClusterCoreSupervisor], joinConfigCompatChecker).withDispatcher(context.props.dispatcher),
name = "core"))
- context.actorOf(Props[ClusterHeartbeatReceiver].withDispatcher(context.props.dispatcher),
- name = "heartbeatReceiver")
+ context.actorOf(
+ Props[ClusterHeartbeatReceiver].withDispatcher(context.props.dispatcher),
+ name = "heartbeatReceiver")
}
def receive = {
@@ -266,10 +267,9 @@ private[cluster] final class ClusterCoreSupervisor(joinConfigCompatChecker: Join
val publisher =
context.actorOf(Props[ClusterDomainEventPublisher].withDispatcher(context.props.dispatcher), name = "publisher")
coreDaemon = Some(
- context.watch(
- context.actorOf(Props(classOf[ClusterCoreDaemon], publisher, joinConfigCompatChecker)
- .withDispatcher(context.props.dispatcher),
- name = "daemon")))
+ context.watch(context.actorOf(
+ Props(classOf[ClusterCoreDaemon], publisher, joinConfigCompatChecker).withDispatcher(context.props.dispatcher),
+ name = "daemon")))
}
override val supervisorStrategy =
@@ -322,15 +322,17 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
protected def selfUniqueAddress = cluster.selfUniqueAddress
val vclockNode = VectorClock.Node(Gossip.vclockName(selfUniqueAddress))
- val gossipTargetSelector = new GossipTargetSelector(ReduceGossipDifferentViewProbability,
- cluster.settings.MultiDataCenter.CrossDcGossipProbability)
+ val gossipTargetSelector = new GossipTargetSelector(
+ ReduceGossipDifferentViewProbability,
+ cluster.settings.MultiDataCenter.CrossDcGossipProbability)
// note that self is not initially member,
// and the Gossip is not versioned for this 'Node' yet
- var membershipState = MembershipState(Gossip.empty,
- cluster.selfUniqueAddress,
- cluster.settings.SelfDataCenter,
- cluster.settings.MultiDataCenter.CrossDcConnections)
+ var membershipState = MembershipState(
+ Gossip.empty,
+ cluster.selfUniqueAddress,
+ cluster.settings.SelfDataCenter,
+ cluster.settings.MultiDataCenter.CrossDcConnections)
var isCurrentlyLeader = false
@@ -379,16 +381,18 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
val gossipTask = scheduler.schedule(PeriodicTasksInitialDelay.max(GossipInterval), GossipInterval, self, GossipTick)
// start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list)
- val failureDetectorReaperTask = scheduler.schedule(PeriodicTasksInitialDelay.max(UnreachableNodesReaperInterval),
- UnreachableNodesReaperInterval,
- self,
- ReapUnreachableTick)
+ val failureDetectorReaperTask = scheduler.schedule(
+ PeriodicTasksInitialDelay.max(UnreachableNodesReaperInterval),
+ UnreachableNodesReaperInterval,
+ self,
+ ReapUnreachableTick)
// start periodic leader action management (only applies for the current leader)
- val leaderActionsTask = scheduler.schedule(PeriodicTasksInitialDelay.max(LeaderActionsInterval),
- LeaderActionsInterval,
- self,
- LeaderActionsTick)
+ val leaderActionsTask = scheduler.schedule(
+ PeriodicTasksInitialDelay.max(LeaderActionsInterval),
+ LeaderActionsInterval,
+ self,
+ LeaderActionsTick)
// start periodic publish of current stats
val publishStatsTask: Option[Cancellable] = PublishStatsInterval match {
@@ -491,10 +495,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
}
private def joinSeedNodesWasUnsuccessful(): Unit = {
- logWarning("Joining of seed-nodes [{}] was unsuccessful after configured " +
- "shutdown-after-unsuccessful-join-seed-nodes [{}]. Running CoordinatedShutdown.",
- seedNodes.mkString(", "),
- ShutdownAfterUnsuccessfulJoinSeedNodes)
+ logWarning(
+ "Joining of seed-nodes [{}] was unsuccessful after configured " +
+ "shutdown-after-unsuccessful-join-seed-nodes [{}]. Running CoordinatedShutdown.",
+ seedNodes.mkString(", "),
+ ShutdownAfterUnsuccessfulJoinSeedNodes)
joinSeedNodesDeadline = None
CoordinatedShutdown(context.system).run(CoordinatedShutdown.ClusterDowningReason)
}
@@ -572,16 +577,18 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
if (removeUnreachableWithMemberStatus.contains(selfStatus)) {
// prevents a Down and Exiting node from being used for joining
- logInfo("Sending InitJoinNack message from node [{}] to [{}] (version [{}])",
- selfAddress,
- sender(),
- joiningNodeVersion)
+ logInfo(
+ "Sending InitJoinNack message from node [{}] to [{}] (version [{}])",
+ selfAddress,
+ sender(),
+ joiningNodeVersion)
sender() ! InitJoinNack(selfAddress)
} else {
- logInfo("Sending InitJoinAck message from node [{}] to [{}] (version [{}])",
- selfAddress,
- sender(),
- joiningNodeVersion)
+ logInfo(
+ "Sending InitJoinAck message from node [{}] to [{}] (version [{}])",
+ selfAddress,
+ sender(),
+ joiningNodeVersion)
// run config compatibility check using config provided by
// joining node and current (full) config on cluster side
@@ -609,11 +616,12 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
}
case Invalid(messages) =>
// messages are only logged on the cluster side
- logWarning("Found incompatible settings when [{}] tried to join: {}. " +
- s"Self version [{}], Joining version [$joiningNodeVersion].",
- sender().path.address,
- messages.mkString(", "),
- context.system.settings.ConfigVersion)
+ logWarning(
+ "Found incompatible settings when [{}] tried to join: {}. " +
+ s"Self version [{}], Joining version [$joiningNodeVersion].",
+ sender().path.address,
+ messages.mkString(", "),
+ context.system.settings.ConfigVersion)
if (configCheckUnsupportedByJoiningNode)
ConfigCheckUnsupportedByJoiningNode
else
@@ -659,13 +667,15 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
*/
def join(address: Address): Unit = {
if (address.protocol != selfAddress.protocol)
- logWarning("Trying to join member with wrong protocol, but was ignored, expected [{}] but was [{}]",
- selfAddress.protocol,
- address.protocol)
+ logWarning(
+ "Trying to join member with wrong protocol, but was ignored, expected [{}] but was [{}]",
+ selfAddress.protocol,
+ address.protocol)
else if (address.system != selfAddress.system)
- logWarning("Trying to join member with wrong ActorSystem name, but was ignored, expected [{}] but was [{}]",
- selfAddress.system,
- address.system)
+ logWarning(
+ "Trying to join member with wrong ActorSystem name, but was ignored, expected [{}] but was [{}]",
+ selfAddress.system,
+ address.system)
else {
require(latestGossip.members.isEmpty, "Join can only be done from empty state")
@@ -704,13 +714,15 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
def joining(joiningNode: UniqueAddress, roles: Set[String]): Unit = {
val selfStatus = latestGossip.member(selfUniqueAddress).status
if (joiningNode.address.protocol != selfAddress.protocol)
- logWarning("Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]",
- selfAddress.protocol,
- joiningNode.address.protocol)
+ logWarning(
+ "Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]",
+ selfAddress.protocol,
+ joiningNode.address.protocol)
else if (joiningNode.address.system != selfAddress.system)
- logWarning("Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]",
- selfAddress.system,
- joiningNode.address.system)
+ logWarning(
+ "Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]",
+ selfAddress.system,
+ joiningNode.address.system)
else if (removeUnreachableWithMemberStatus.contains(selfStatus))
logInfo("Trying to join [{}] to [{}] member, ignoring. Use a member that is Up instead.", joiningNode, selfStatus)
else {
@@ -728,9 +740,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
// node restarted, same host:port as existing member, but with different uid
// safe to down and later remove existing member
// new node will retry join
- logInfo("New incarnation of existing member [{}] is trying to join. " +
- "Existing will be removed from the cluster and then new member will be allowed to join.",
- m)
+ logInfo(
+ "New incarnation of existing member [{}] is trying to join. " +
+ "Existing will be removed from the cluster and then new member will be allowed to join.",
+ m)
if (m.status != Down) {
// we can confirm it as terminated/unreachable immediately
val newReachability = latestGossip.overview.reachability.terminated(selfUniqueAddress, m.uniqueAddress)
@@ -753,9 +766,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
updateLatestGossip(newGossip)
if (joiningNode == selfUniqueAddress) {
- logInfo("Node [{}] is JOINING itself (with roles [{}]) and forming new cluster",
- joiningNode.address,
- roles.mkString(", "))
+ logInfo(
+ "Node [{}] is JOINING itself (with roles [{}]) and forming new cluster",
+ joiningNode.address,
+ roles.mkString(", "))
if (localMembers.isEmpty)
leaderActions() // important for deterministic oldest when bootstrapping
} else {
@@ -898,10 +912,11 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
val newOverview = localGossip.overview.copy(reachability = newReachability)
val newGossip = localGossip.copy(overview = newOverview)
updateLatestGossip(newGossip)
- logWarning("Marking node as TERMINATED [{}], due to quarantine. Node roles [{}]. " +
- "It must still be marked as down before it's removed.",
- node.address,
- selfRoles.mkString(","))
+ logWarning(
+ "Marking node as TERMINATED [{}], due to quarantine. Node roles [{}]. " +
+ "It must still be marked as down before it's removed.",
+ node.address,
+ selfRoles.mkString(","))
publishMembershipState()
}
}
@@ -1123,14 +1138,15 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
moveJoiningToWeaklyUp()
if (leaderActionCounter == firstNotice || leaderActionCounter % periodicNotice == 0)
- logInfo("Leader can currently not perform its duties, reachability status: [{}], member status: [{}]",
- membershipState.dcReachabilityExcludingDownedObservers,
- latestGossip.members
- .collect {
- case m if m.dataCenter == selfDc =>
- s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}"
- }
- .mkString(", "))
+ logInfo(
+ "Leader can currently not perform its duties, reachability status: [{}], member status: [{}]",
+ membershipState.dcReachabilityExcludingDownedObservers,
+ latestGossip.members
+ .collect {
+ case m if m.dataCenter == selfDc =>
+ s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}"
+ }
+ .mkString(", "))
}
} else if (isCurrentlyLeader) {
logInfo("is no longer leader")
@@ -1288,9 +1304,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
if (targets.nonEmpty) {
if (isDebugEnabled)
- logDebug("Gossip exiting members [{}] to the two oldest (per role) [{}] (singleton optimization).",
- exitingMembers.mkString(", "),
- targets.mkString(", "))
+ logDebug(
+ "Gossip exiting members [{}] to the two oldest (per role) [{}] (singleton optimization).",
+ exitingMembers.mkString(", "),
+ targets.mkString(", "))
targets.foreach(m => gossipTo(m.uniqueAddress))
}
@@ -1370,16 +1387,19 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
val (exiting, nonExiting) = newlyDetectedUnreachableMembers.partition(_.status == Exiting)
if (nonExiting.nonEmpty)
- logWarning("Marking node(s) as UNREACHABLE [{}]. Node roles [{}]",
- nonExiting.mkString(", "),
- selfRoles.mkString(", "))
+ logWarning(
+ "Marking node(s) as UNREACHABLE [{}]. Node roles [{}]",
+ nonExiting.mkString(", "),
+ selfRoles.mkString(", "))
if (exiting.nonEmpty)
- logInfo("Marking exiting node(s) as UNREACHABLE [{}]. This is expected and they will be removed.",
- exiting.mkString(", "))
+ logInfo(
+ "Marking exiting node(s) as UNREACHABLE [{}]. This is expected and they will be removed.",
+ exiting.mkString(", "))
if (newlyDetectedReachableMembers.nonEmpty)
- logInfo("Marking node(s) as REACHABLE [{}]. Node roles [{}]",
- newlyDetectedReachableMembers.mkString(", "),
- selfRoles.mkString(","))
+ logInfo(
+ "Marking node(s) as REACHABLE [{}]. Node roles [{}]",
+ newlyDetectedReachableMembers.mkString(", "),
+ selfRoles.mkString(","))
publishMembershipState()
}
@@ -1449,9 +1469,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh
}
def publishInternalStats(): Unit = {
- val vclockStats = VectorClockStats(versionSize = latestGossip.version.versions.size,
- seenLatest =
- latestGossip.members.count(m => latestGossip.seenByNode(m.uniqueAddress)))
+ val vclockStats = VectorClockStats(
+ versionSize = latestGossip.version.versions.size,
+ seenLatest = latestGossip.members.count(m => latestGossip.seenByNode(m.uniqueAddress)))
publisher ! CurrentInternalStats(gossipStats, vclockStats)
}
@@ -1475,8 +1495,9 @@ private[cluster] case object IncompatibleConfigurationDetected extends Reason
* that other seed node to join existing cluster.
*/
@InternalApi
-private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSeq[Address],
- joinConfigCompatChecker: JoinConfigCompatChecker)
+private[cluster] final class FirstSeedNodeProcess(
+ seedNodes: immutable.IndexedSeq[Address],
+ joinConfigCompatChecker: JoinConfigCompatChecker)
extends Actor {
import InternalClusterAction._
import ClusterUserAction.JoinTo
@@ -1531,9 +1552,10 @@ private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSe
context.stop(self)
case Invalid(messages) if ByPassConfigCompatCheck =>
- logWarning("Cluster validated this node config, but sent back incompatible settings: {}. " +
- "Join will be performed because compatibility check is configured to not be enforced.",
- messages.mkString(", "))
+ logWarning(
+ "Cluster validated this node config, but sent back incompatible settings: {}. " +
+ "Join will be performed because compatibility check is configured to not be enforced.",
+ messages.mkString(", "))
context.parent ! JoinTo(address)
context.stop(self)
@@ -1615,8 +1637,9 @@ private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSe
*
*/
@InternalApi
-private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq[Address],
- joinConfigCompatChecker: JoinConfigCompatChecker)
+private[cluster] final class JoinSeedNodeProcess(
+ seedNodes: immutable.IndexedSeq[Address],
+ joinConfigCompatChecker: JoinConfigCompatChecker)
extends Actor {
import InternalClusterAction._
import ClusterUserAction.JoinTo
@@ -1661,9 +1684,10 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq
context.become(done)
case Invalid(messages) if ByPassConfigCompatCheck =>
- logWarning("Cluster validated this node config, but sent back incompatible settings: {}. " +
- "Join will be performed because compatibility check is configured to not be enforced.",
- messages.mkString(", "))
+ logWarning(
+ "Cluster validated this node config, but sent back incompatible settings: {}. " +
+ "Join will be performed because compatibility check is configured to not be enforced.",
+ messages.mkString(", "))
context.parent ! JoinTo(address)
context.become(done)
@@ -1711,9 +1735,10 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq
case ReceiveTimeout =>
if (attempt >= 2)
- logWarning("Couldn't join seed nodes after [{}] attempts, will try again. seed-nodes=[{}]",
- attempt,
- seedNodes.filterNot(_ == selfAddress).mkString(", "))
+ logWarning(
+ "Couldn't join seed nodes after [{}] attempts, will try again. seed-nodes=[{}]",
+ attempt,
+ seedNodes.filterNot(_ == selfAddress).mkString(", "))
// no InitJoinAck received, try again
self ! JoinSeedNode
}
@@ -1782,11 +1807,12 @@ private[cluster] class OnMemberStatusChangedListener(callback: Runnable, status:
*/
@InternalApi
@SerialVersionUID(1L)
-private[cluster] final case class GossipStats(receivedGossipCount: Long = 0L,
- mergeCount: Long = 0L,
- sameCount: Long = 0L,
- newerCount: Long = 0L,
- olderCount: Long = 0L) {
+private[cluster] final case class GossipStats(
+ receivedGossipCount: Long = 0L,
+ mergeCount: Long = 0L,
+ sameCount: Long = 0L,
+ newerCount: Long = 0L,
+ olderCount: Long = 0L) {
def incrementMergeCount(): GossipStats =
copy(mergeCount = mergeCount + 1, receivedGossipCount = receivedGossipCount + 1)
@@ -1801,19 +1827,21 @@ private[cluster] final case class GossipStats(receivedGossipCount: Long = 0L,
copy(olderCount = olderCount + 1, receivedGossipCount = receivedGossipCount + 1)
def :+(that: GossipStats): GossipStats = {
- GossipStats(this.receivedGossipCount + that.receivedGossipCount,
- this.mergeCount + that.mergeCount,
- this.sameCount + that.sameCount,
- this.newerCount + that.newerCount,
- this.olderCount + that.olderCount)
+ GossipStats(
+ this.receivedGossipCount + that.receivedGossipCount,
+ this.mergeCount + that.mergeCount,
+ this.sameCount + that.sameCount,
+ this.newerCount + that.newerCount,
+ this.olderCount + that.olderCount)
}
def :-(that: GossipStats): GossipStats = {
- GossipStats(this.receivedGossipCount - that.receivedGossipCount,
- this.mergeCount - that.mergeCount,
- this.sameCount - that.sameCount,
- this.newerCount - that.newerCount,
- this.olderCount - that.olderCount)
+ GossipStats(
+ this.receivedGossipCount - that.receivedGossipCount,
+ this.mergeCount - that.mergeCount,
+ this.sameCount - that.sameCount,
+ this.newerCount - that.newerCount,
+ this.olderCount - that.olderCount)
}
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
index f479efa1fa..9988012be0 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
@@ -64,18 +64,20 @@ object ClusterEvent {
// for binary compatibility (used to be a case class)
object CurrentClusterState
- extends AbstractFunction5[immutable.SortedSet[Member],
- Set[Member],
- Set[Address],
- Option[Address],
- Map[String, Option[Address]],
- CurrentClusterState] {
+ extends AbstractFunction5[
+ immutable.SortedSet[Member],
+ Set[Member],
+ Set[Address],
+ Option[Address],
+ Map[String, Option[Address]],
+ CurrentClusterState] {
- def apply(members: immutable.SortedSet[Member] = immutable.SortedSet.empty,
- unreachable: Set[Member] = Set.empty,
- seenBy: Set[Address] = Set.empty,
- leader: Option[Address] = None,
- roleLeaderMap: Map[String, Option[Address]] = Map.empty): CurrentClusterState =
+ def apply(
+ members: immutable.SortedSet[Member] = immutable.SortedSet.empty,
+ unreachable: Set[Member] = Set.empty,
+ seenBy: Set[Address] = Set.empty,
+ leader: Option[Address] = None,
+ roleLeaderMap: Map[String, Option[Address]] = Map.empty): CurrentClusterState =
new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap)
def unapply(cs: CurrentClusterState): Option[
@@ -90,25 +92,28 @@ object ClusterEvent {
* @param leader leader of the data center of this node
*/
@SerialVersionUID(2)
- final class CurrentClusterState(val members: immutable.SortedSet[Member],
- val unreachable: Set[Member],
- val seenBy: Set[Address],
- val leader: Option[Address],
- val roleLeaderMap: Map[String, Option[Address]],
- val unreachableDataCenters: Set[DataCenter])
- extends Product5[immutable.SortedSet[Member],
- Set[Member],
- Set[Address],
- Option[Address],
- Map[String, Option[Address]]]
+ final class CurrentClusterState(
+ val members: immutable.SortedSet[Member],
+ val unreachable: Set[Member],
+ val seenBy: Set[Address],
+ val leader: Option[Address],
+ val roleLeaderMap: Map[String, Option[Address]],
+ val unreachableDataCenters: Set[DataCenter])
+ extends Product5[
+ immutable.SortedSet[Member],
+ Set[Member],
+ Set[Address],
+ Option[Address],
+ Map[String, Option[Address]]]
with Serializable {
// for binary compatibility
- def this(members: immutable.SortedSet[Member] = immutable.SortedSet.empty,
- unreachable: Set[Member] = Set.empty,
- seenBy: Set[Address] = Set.empty,
- leader: Option[Address] = None,
- roleLeaderMap: Map[String, Option[Address]] = Map.empty) =
+ def this(
+ members: immutable.SortedSet[Member] = immutable.SortedSet.empty,
+ unreachable: Set[Member] = Set.empty,
+ seenBy: Set[Address] = Set.empty,
+ leader: Option[Address] = None,
+ roleLeaderMap: Map[String, Option[Address]] = Map.empty) =
this(members, unreachable, seenBy, leader, roleLeaderMap, Set.empty)
/**
@@ -182,11 +187,12 @@ object ClusterEvent {
new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap, unreachableDataCenters)
// for binary compatibility (used to be a case class)
- def copy(members: immutable.SortedSet[Member] = this.members,
- unreachable: Set[Member] = this.unreachable,
- seenBy: Set[Address] = this.seenBy,
- leader: Option[Address] = this.leader,
- roleLeaderMap: Map[String, Option[Address]] = this.roleLeaderMap) =
+ def copy(
+ members: immutable.SortedSet[Member] = this.members,
+ unreachable: Set[Member] = this.unreachable,
+ seenBy: Set[Address] = this.seenBy,
+ leader: Option[Address] = this.leader,
+ roleLeaderMap: Map[String, Option[Address]] = this.roleLeaderMap) =
new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap, unreachableDataCenters)
override def equals(other: Any): Boolean = other match {
@@ -379,8 +385,9 @@ object ClusterEvent {
/**
* INTERNAL API
*/
- private[cluster] def diffUnreachable(oldState: MembershipState,
- newState: MembershipState): immutable.Seq[UnreachableMember] =
+ private[cluster] def diffUnreachable(
+ oldState: MembershipState,
+ newState: MembershipState): immutable.Seq[UnreachableMember] =
if (newState eq oldState) Nil
else {
val newGossip = newState.latestGossip
@@ -396,8 +403,9 @@ object ClusterEvent {
/**
* INTERNAL API
*/
- private[cluster] def diffReachable(oldState: MembershipState,
- newState: MembershipState): immutable.Seq[ReachableMember] =
+ private[cluster] def diffReachable(
+ oldState: MembershipState,
+ newState: MembershipState): immutable.Seq[ReachableMember] =
if (newState eq oldState) Nil
else {
val newGossip = newState.latestGossip
@@ -426,8 +434,9 @@ object ClusterEvent {
/**
* INTERNAL API
*/
- private[cluster] def diffUnreachableDataCenter(oldState: MembershipState,
- newState: MembershipState): immutable.Seq[UnreachableDataCenter] = {
+ private[cluster] def diffUnreachableDataCenter(
+ oldState: MembershipState,
+ newState: MembershipState): immutable.Seq[UnreachableDataCenter] = {
if (newState eq oldState) Nil
else {
val otherDcs = (oldState.latestGossip.allDataCenters
@@ -443,8 +452,9 @@ object ClusterEvent {
/**
* INTERNAL API
*/
- private[cluster] def diffReachableDataCenter(oldState: MembershipState,
- newState: MembershipState): immutable.Seq[ReachableDataCenter] = {
+ private[cluster] def diffReachableDataCenter(
+ oldState: MembershipState,
+ newState: MembershipState): immutable.Seq[ReachableDataCenter] = {
if (newState eq oldState) Nil
else {
val otherDcs = (oldState.latestGossip.allDataCenters
@@ -460,8 +470,9 @@ object ClusterEvent {
/**
* INTERNAL API.
*/
- private[cluster] def diffMemberEvents(oldState: MembershipState,
- newState: MembershipState): immutable.Seq[MemberEvent] =
+ private[cluster] def diffMemberEvents(
+ oldState: MembershipState,
+ newState: MembershipState): immutable.Seq[MemberEvent] =
if (newState eq oldState) Nil
else {
val oldGossip = oldState.latestGossip
@@ -494,8 +505,9 @@ object ClusterEvent {
* INTERNAL API
*/
@InternalApi
- private[cluster] def diffLeader(oldState: MembershipState,
- newState: MembershipState): immutable.Seq[LeaderChanged] = {
+ private[cluster] def diffLeader(
+ oldState: MembershipState,
+ newState: MembershipState): immutable.Seq[LeaderChanged] = {
val newLeader = newState.leader
if (newLeader != oldState.leader) List(LeaderChanged(newLeader.map(_.address)))
else Nil
@@ -531,8 +543,9 @@ object ClusterEvent {
* INTERNAL API
*/
@InternalApi
- private[cluster] def diffReachability(oldState: MembershipState,
- newState: MembershipState): immutable.Seq[ReachabilityChanged] =
+ private[cluster] def diffReachability(
+ oldState: MembershipState,
+ newState: MembershipState): immutable.Seq[ReachabilityChanged] =
if (newState.overview.reachability eq oldState.overview.reachability) Nil
else List(ReachabilityChanged(newState.overview.reachability))
@@ -551,10 +564,11 @@ private[cluster] final class ClusterDomainEventPublisher
val cluster = Cluster(context.system)
val selfUniqueAddress = cluster.selfUniqueAddress
- val emptyMembershipState = MembershipState(Gossip.empty,
- cluster.selfUniqueAddress,
- cluster.settings.SelfDataCenter,
- cluster.settings.MultiDataCenter.CrossDcConnections)
+ val emptyMembershipState = MembershipState(
+ Gossip.empty,
+ cluster.selfUniqueAddress,
+ cluster.settings.SelfDataCenter,
+ cluster.settings.MultiDataCenter.CrossDcConnections)
var membershipState: MembershipState = emptyMembershipState
def selfDc = cluster.settings.SelfDataCenter
@@ -593,14 +607,15 @@ private[cluster] final class ClusterDomainEventPublisher
if (!membershipState.latestGossip.isMultiDc) Set.empty
else membershipState.latestGossip.allDataCenters.filterNot(isReachable(membershipState, Set.empty))
- val state = new CurrentClusterState(members = membershipState.latestGossip.members,
- unreachable = unreachable,
- seenBy = membershipState.latestGossip.seenBy.map(_.address),
- leader = membershipState.leader.map(_.address),
- roleLeaderMap = membershipState.latestGossip.allRoles.iterator
- .map(r => r -> membershipState.roleLeader(r).map(_.address))
- .toMap,
- unreachableDataCenters)
+ val state = new CurrentClusterState(
+ members = membershipState.latestGossip.members,
+ unreachable = unreachable,
+ seenBy = membershipState.latestGossip.seenBy.map(_.address),
+ leader = membershipState.leader.map(_.address),
+ roleLeaderMap = membershipState.latestGossip.allRoles.iterator
+ .map(r => r -> membershipState.roleLeader(r).map(_.address))
+ .toMap,
+ unreachableDataCenters)
receiver ! state
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala
index 8215733762..52e58a4e11 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala
@@ -224,9 +224,10 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg
* It is immutable, but it updates the failureDetector.
*/
@InternalApi
-private[cluster] final case class ClusterHeartbeatSenderState(ring: HeartbeatNodeRing,
- oldReceiversNowUnreachable: Set[UniqueAddress],
- failureDetector: FailureDetectorRegistry[Address]) {
+private[cluster] final case class ClusterHeartbeatSenderState(
+ ring: HeartbeatNodeRing,
+ oldReceiversNowUnreachable: Set[UniqueAddress],
+ failureDetector: FailureDetectorRegistry[Address]) {
val activeReceivers: Set[UniqueAddress] = ring.myReceivers.union(oldReceiversNowUnreachable)
@@ -291,10 +292,11 @@ private[cluster] final case class ClusterHeartbeatSenderState(ring: HeartbeatNod
*
* It is immutable, i.e. the methods return new instances.
*/
-private[cluster] final case class HeartbeatNodeRing(selfAddress: UniqueAddress,
- nodes: Set[UniqueAddress],
- unreachable: Set[UniqueAddress],
- monitoredByNrOfMembers: Int) {
+private[cluster] final case class HeartbeatNodeRing(
+ selfAddress: UniqueAddress,
+ nodes: Set[UniqueAddress],
+ unreachable: Set[UniqueAddress],
+ monitoredByNrOfMembers: Int) {
require(nodes contains selfAddress, s"nodes [${nodes.mkString(", ")}] must contain selfAddress [${selfAddress}]")
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala
index 109161ef2d..7e37dabf83 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala
@@ -25,15 +25,17 @@ private[cluster] object ClusterRemoteWatcher {
/**
* Factory method for `ClusterRemoteWatcher` [[akka.actor.Props]].
*/
- def props(failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
- heartbeatExpectedResponseAfter: FiniteDuration): Props =
- Props(classOf[ClusterRemoteWatcher],
- failureDetector,
- heartbeatInterval,
- unreachableReaperInterval,
- heartbeatExpectedResponseAfter).withDeploy(Deploy.local)
+ def props(
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
+ heartbeatExpectedResponseAfter: FiniteDuration): Props =
+ Props(
+ classOf[ClusterRemoteWatcher],
+ failureDetector,
+ heartbeatInterval,
+ unreachableReaperInterval,
+ heartbeatExpectedResponseAfter).withDeploy(Deploy.local)
private final case class DelayedQuarantine(m: Member, previousStatus: MemberStatus)
extends NoSerializationVerificationNeeded
@@ -51,10 +53,11 @@ private[cluster] object ClusterRemoteWatcher {
* over responsibility from `RemoteWatcher` if a watch is added before a node is member
* of the cluster and then later becomes cluster member.
*/
-private[cluster] class ClusterRemoteWatcher(failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
- heartbeatExpectedResponseAfter: FiniteDuration)
+private[cluster] class ClusterRemoteWatcher(
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
+ heartbeatExpectedResponseAfter: FiniteDuration)
extends RemoteWatcher(failureDetector, heartbeatInterval, unreachableReaperInterval, heartbeatExpectedResponseAfter) {
import ClusterRemoteWatcher.DelayedQuarantine
@@ -110,10 +113,11 @@ private[cluster] class ClusterRemoteWatcher(failureDetector: FailureDetectorRegi
clusterNodes -= m.address
if (previousStatus == MemberStatus.Down) {
- quarantine(m.address,
- Some(m.uniqueAddress.longUid),
- s"Cluster member removed, previous status [$previousStatus]",
- harmless = false)
+ quarantine(
+ m.address,
+ Some(m.uniqueAddress.longUid),
+ s"Cluster member removed, previous status [$previousStatus]",
+ harmless = false)
} else if (arteryEnabled) {
// Don't quarantine gracefully removed members (leaving) directly,
// give Cluster Singleton some time to exchange TakeOver/HandOver messages.
@@ -133,20 +137,22 @@ private[cluster] class ClusterRemoteWatcher(failureDetector: FailureDetectorRegi
if (pendingDelayedQuarantine.nonEmpty)
pendingDelayedQuarantine.find(_.address == newIncarnation.address).foreach { oldIncarnation =>
pendingDelayedQuarantine -= oldIncarnation
- quarantine(oldIncarnation.address,
- Some(oldIncarnation.longUid),
- s"Cluster member removed, new incarnation joined",
- harmless = true)
+ quarantine(
+ oldIncarnation.address,
+ Some(oldIncarnation.longUid),
+ s"Cluster member removed, new incarnation joined",
+ harmless = true)
}
}
def delayedQuarantine(m: Member, previousStatus: MemberStatus): Unit = {
if (pendingDelayedQuarantine(m.uniqueAddress)) {
pendingDelayedQuarantine -= m.uniqueAddress
- quarantine(m.address,
- Some(m.uniqueAddress.longUid),
- s"Cluster member removed, previous status [$previousStatus]",
- harmless = true)
+ quarantine(
+ m.address,
+ Some(m.uniqueAddress.longUid),
+ s"Cluster member removed, previous status [$previousStatus]",
+ harmless = true)
}
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala
index 8b29f28a4e..603bb8c30e 100644
--- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala
@@ -57,16 +57,18 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg
val selfHeartbeat = ClusterHeartbeatSender.Heartbeat(selfAddress)
- var dataCentersState: CrossDcHeartbeatingState = CrossDcHeartbeatingState.init(selfDataCenter,
- crossDcFailureDetector,
- crossDcSettings.NrOfMonitoringActors,
- SortedSet.empty)
+ var dataCentersState: CrossDcHeartbeatingState = CrossDcHeartbeatingState.init(
+ selfDataCenter,
+ crossDcFailureDetector,
+ crossDcSettings.NrOfMonitoringActors,
+ SortedSet.empty)
// start periodic heartbeat to other nodes in cluster
- val heartbeatTask = scheduler.schedule(PeriodicTasksInitialDelay max HeartbeatInterval,
- HeartbeatInterval,
- self,
- ClusterHeartbeatSender.HeartbeatTick)
+ val heartbeatTask = scheduler.schedule(
+ PeriodicTasksInitialDelay max HeartbeatInterval,
+ HeartbeatInterval,
+ self,
+ ClusterHeartbeatSender.HeartbeatTick)
override def preStart(): Unit = {
cluster.subscribe(self, classOf[MemberEvent])
@@ -183,8 +185,9 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg
/** Idempotent, become active if this node is n-th oldest and should monitor other nodes */
private def becomeActiveIfResponsibleForHeartbeat(): Unit = {
if (!activelyMonitoring && selfIsResponsibleForCrossDcHeartbeat()) {
- log.info("Cross DC heartbeat becoming ACTIVE on this node (for DC: {}), monitoring other DCs oldest nodes",
- selfDataCenter)
+ log.info(
+ "Cross DC heartbeat becoming ACTIVE on this node (for DC: {}), monitoring other DCs oldest nodes",
+ selfDataCenter)
activelyMonitoring = true
context.become(active.orElse(introspecting))
@@ -211,10 +214,11 @@ private[akka] object CrossDcHeartbeatSender {
/** INTERNAL API */
@InternalApi
-private[cluster] final case class CrossDcHeartbeatingState(selfDataCenter: DataCenter,
- failureDetector: FailureDetectorRegistry[Address],
- nrOfMonitoredNodesPerDc: Int,
- state: Map[ClusterSettings.DataCenter, SortedSet[Member]]) {
+private[cluster] final case class CrossDcHeartbeatingState(
+ selfDataCenter: DataCenter,
+ failureDetector: FailureDetectorRegistry[Address],
+ nrOfMonitoredNodesPerDc: Int,
+ state: Map[ClusterSettings.DataCenter, SortedSet[Member]]) {
import CrossDcHeartbeatingState._
/**
@@ -309,10 +313,11 @@ private[cluster] object CrossDcHeartbeatingState {
def atLeastInUpState(m: Member): Boolean =
m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.Joining
- def init(selfDataCenter: DataCenter,
- crossDcFailureDetector: FailureDetectorRegistry[Address],
- nrOfMonitoredNodesPerDc: Int,
- members: SortedSet[Member]): CrossDcHeartbeatingState = {
+ def init(
+ selfDataCenter: DataCenter,
+ crossDcFailureDetector: FailureDetectorRegistry[Address],
+ nrOfMonitoredNodesPerDc: Int,
+ members: SortedSet[Member]): CrossDcHeartbeatingState = {
new CrossDcHeartbeatingState(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodesPerDc, state = {
// TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc
val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter)
diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala
index b67968d864..061cc72692 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala
@@ -74,24 +74,28 @@ private[cluster] final case class Gossip(
def ifTrueThrow(func: => Boolean, expected: String, actual: String): Unit =
if (func) throw new IllegalArgumentException(s"$expected, but found [$actual]")
- ifTrueThrow(members.exists(_.status == Removed),
- expected = s"Live members must not have status [$Removed]",
- actual = s"${members.filter(_.status == Removed)}")
+ ifTrueThrow(
+ members.exists(_.status == Removed),
+ expected = s"Live members must not have status [$Removed]",
+ actual = s"${members.filter(_.status == Removed)}")
val inReachabilityButNotMember = overview.reachability.allObservers.diff(members.map(_.uniqueAddress))
- ifTrueThrow(inReachabilityButNotMember.nonEmpty,
- expected = "Nodes not part of cluster in reachability table",
- actual = inReachabilityButNotMember.mkString(", "))
+ ifTrueThrow(
+ inReachabilityButNotMember.nonEmpty,
+ expected = "Nodes not part of cluster in reachability table",
+ actual = inReachabilityButNotMember.mkString(", "))
val inReachabilityVersionsButNotMember = overview.reachability.versions.keySet.diff(members.map(_.uniqueAddress))
- ifTrueThrow(inReachabilityVersionsButNotMember.nonEmpty,
- expected = "Nodes not part of cluster in reachability versions table",
- actual = inReachabilityVersionsButNotMember.mkString(", "))
+ ifTrueThrow(
+ inReachabilityVersionsButNotMember.nonEmpty,
+ expected = "Nodes not part of cluster in reachability versions table",
+ actual = inReachabilityVersionsButNotMember.mkString(", "))
val seenButNotMember = overview.seen.diff(members.map(_.uniqueAddress))
- ifTrueThrow(seenButNotMember.nonEmpty,
- expected = "Nodes not part of cluster have marked the Gossip as seen",
- actual = seenButNotMember.mkString(", "))
+ ifTrueThrow(
+ seenButNotMember.nonEmpty,
+ expected = "Nodes not part of cluster have marked the Gossip as seen",
+ actual = seenButNotMember.mkString(", "))
}
@transient private lazy val membersMap: Map[UniqueAddress, Member] =
@@ -271,8 +275,9 @@ private[cluster] final case class Gossip(
* Represents the overview of the cluster, holds the cluster convergence table and set with unreachable nodes.
*/
@SerialVersionUID(1L)
-private[cluster] final case class GossipOverview(seen: Set[UniqueAddress] = Set.empty,
- reachability: Reachability = Reachability.empty) {
+private[cluster] final case class GossipOverview(
+ seen: Set[UniqueAddress] = Set.empty,
+ reachability: Reachability = Reachability.empty) {
override def toString =
s"GossipOverview(reachability = [$reachability], seen = [${seen.mkString(", ")}])"
@@ -295,11 +300,12 @@ object GossipEnvelope {
* different in that case.
*/
@SerialVersionUID(2L)
-private[cluster] class GossipEnvelope private (val from: UniqueAddress,
- val to: UniqueAddress,
- @volatile var g: Gossip,
- serDeadline: Deadline,
- @transient @volatile var ser: () => Gossip)
+private[cluster] class GossipEnvelope private (
+ val from: UniqueAddress,
+ val to: UniqueAddress,
+ @volatile var g: Gossip,
+ serDeadline: Deadline,
+ @transient @volatile var ser: () => Gossip)
extends ClusterMessage {
def gossip: Gossip = {
diff --git a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala
index 8cd2baedc9..96ff95814c 100644
--- a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala
@@ -109,8 +109,9 @@ object JoinConfigCompatChecker {
* from the passed `requiredKeys` Seq.
*/
@InternalApi
- private[cluster] def removeSensitiveKeys(requiredKeys: im.Seq[String],
- clusterSettings: ClusterSettings): im.Seq[String] = {
+ private[cluster] def removeSensitiveKeys(
+ requiredKeys: im.Seq[String],
+ clusterSettings: ClusterSettings): im.Seq[String] = {
requiredKeys.filter { key =>
!clusterSettings.SensitiveConfigPaths.exists(s => key.startsWith(s))
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala
index c6e51696db..2e1df09fc8 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Member.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala
@@ -18,10 +18,11 @@ import scala.runtime.AbstractFunction2
* and roles.
*/
@SerialVersionUID(1L)
-class Member private[cluster] (val uniqueAddress: UniqueAddress,
- private[cluster] val upNumber: Int, // INTERNAL API
- val status: MemberStatus,
- val roles: Set[String])
+class Member private[cluster] (
+ val uniqueAddress: UniqueAddress,
+ private[cluster] val upNumber: Int, // INTERNAL API
+ val status: MemberStatus,
+ val roles: Set[String])
extends Serializable {
lazy val dataCenter: DataCenter = roles
@@ -168,9 +169,10 @@ object Member {
* INTERNAL API.
*/
@InternalApi
- private[akka] def pickHighestPriority(a: Set[Member],
- b: Set[Member],
- tombstones: Map[UniqueAddress, Long]): Set[Member] = {
+ private[akka] def pickHighestPriority(
+ a: Set[Member],
+ b: Set[Member],
+ tombstones: Map[UniqueAddress, Long]): Set[Member] = {
// group all members by Address => Seq[Member]
val groupedByAddress = (a.toSeq ++ b.toSeq).groupBy(_.uniqueAddress)
// pick highest MemberStatus
@@ -268,13 +270,14 @@ object MemberStatus {
* INTERNAL API
*/
private[cluster] val allowedTransitions: Map[MemberStatus, Set[MemberStatus]] =
- Map(Joining -> Set(WeaklyUp, Up, Leaving, Down, Removed),
- WeaklyUp -> Set(Up, Leaving, Down, Removed),
- Up -> Set(Leaving, Down, Removed),
- Leaving -> Set(Exiting, Down, Removed),
- Down -> Set(Removed),
- Exiting -> Set(Removed, Down),
- Removed -> Set.empty[MemberStatus])
+ Map(
+ Joining -> Set(WeaklyUp, Up, Leaving, Down, Removed),
+ WeaklyUp -> Set(Up, Leaving, Down, Removed),
+ Up -> Set(Leaving, Down, Removed),
+ Leaving -> Set(Exiting, Down, Removed),
+ Down -> Set(Removed),
+ Exiting -> Set(Removed, Down),
+ Removed -> Set.empty[MemberStatus])
}
object UniqueAddress extends AbstractFunction2[Address, Int, UniqueAddress] {
diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala
index 5fb6046adb..143357cb1f 100644
--- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala
@@ -30,10 +30,11 @@ import scala.util.Random
/**
* INTERNAL API
*/
-@InternalApi private[akka] final case class MembershipState(latestGossip: Gossip,
- selfUniqueAddress: UniqueAddress,
- selfDc: DataCenter,
- crossDcConnections: Int) {
+@InternalApi private[akka] final case class MembershipState(
+ latestGossip: Gossip,
+ selfUniqueAddress: UniqueAddress,
+ selfDc: DataCenter,
+ crossDcConnections: Int) {
import MembershipState._
@@ -221,8 +222,9 @@ import scala.util.Random
/**
* INTERNAL API
*/
-@InternalApi private[akka] class GossipTargetSelector(reduceGossipDifferentViewProbability: Double,
- crossDcGossipProbability: Double) {
+@InternalApi private[akka] class GossipTargetSelector(
+ reduceGossipDifferentViewProbability: Double,
+ crossDcGossipProbability: Double) {
final def gossipTarget(state: MembershipState): Option[UniqueAddress] = {
selectRandomNode(gossipTargets(state))
diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala
index 492c1b2bcb..de567464d1 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala
@@ -55,8 +55,9 @@ private[cluster] object Reachability {
*/
@SerialVersionUID(1L)
@InternalApi
-private[cluster] class Reachability private (val records: immutable.IndexedSeq[Reachability.Record],
- val versions: Map[UniqueAddress, Long])
+private[cluster] class Reachability private (
+ val records: immutable.IndexedSeq[Reachability.Record],
+ val versions: Map[UniqueAddress, Long])
extends Serializable {
import Reachability._
diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala
index 80cc9a45ca..2582883f65 100644
--- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala
@@ -225,9 +225,10 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem)
private def deserializeJoin(bytes: Array[Byte]): InternalClusterAction.Join = {
val m = cm.Join.parseFrom(bytes)
val roles = Set.empty[String] ++ m.getRolesList.asScala
- InternalClusterAction.Join(uniqueAddressFromProto(m.getNode),
- if (roles.exists(_.startsWith(ClusterSettings.DcRolePrefix))) roles
- else roles + (ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter))
+ InternalClusterAction.Join(
+ uniqueAddressFromProto(m.getNode),
+ if (roles.exists(_.startsWith(ClusterSettings.DcRolePrefix))) roles
+ else roles + (ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter))
}
private def deserializeWelcome(bytes: Array[Byte]): InternalClusterAction.Welcome = {
@@ -482,10 +483,11 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem)
}
def memberFromProto(member: cm.Member) =
- new Member(addressMapping(member.getAddressIndex),
- member.getUpNumber,
- memberStatusFromInt(member.getStatus.getNumber),
- rolesFromProto(member.getRolesIndexesList.asScala.toSeq))
+ new Member(
+ addressMapping(member.getAddressIndex),
+ member.getUpNumber,
+ memberStatusFromInt(member.getStatus.getNumber),
+ rolesFromProto(member.getRolesIndexesList.asScala.toSeq))
def rolesFromProto(roleIndexes: Seq[Integer]): Set[String] = {
var containsDc = false
@@ -525,15 +527,17 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem)
private def gossipEnvelopeFromProto(envelope: cm.GossipEnvelope): GossipEnvelope = {
val serializedGossip = envelope.getSerializedGossip
- GossipEnvelope(uniqueAddressFromProto(envelope.getFrom),
- uniqueAddressFromProto(envelope.getTo),
- Deadline.now + GossipTimeToLive,
- () => gossipFromProto(cm.Gossip.parseFrom(decompress(serializedGossip.toByteArray))))
+ GossipEnvelope(
+ uniqueAddressFromProto(envelope.getFrom),
+ uniqueAddressFromProto(envelope.getTo),
+ Deadline.now + GossipTimeToLive,
+ () => gossipFromProto(cm.Gossip.parseFrom(decompress(serializedGossip.toByteArray))))
}
private def gossipStatusFromProto(status: cm.GossipStatus): GossipStatus =
- GossipStatus(uniqueAddressFromProto(status.getFrom),
- vectorClockFromProto(status.getVersion, status.getAllHashesList.asScala.toVector))
+ GossipStatus(
+ uniqueAddressFromProto(status.getFrom),
+ vectorClockFromProto(status.getVersion, status.getAllHashesList.asScala.toVector))
def deserializeClusterRouterPool(bytes: Array[Byte]): ClusterRouterPool = {
val crp = cm.ClusterRouterPool.parseFrom(bytes)
@@ -547,14 +551,15 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem)
private def clusterRouterPoolSettingsFromProto(crps: cm.ClusterRouterPoolSettings): ClusterRouterPoolSettings = {
// For backwards compatibility, useRoles is the combination of getUseRole and getUseRolesList
- ClusterRouterPoolSettings(totalInstances = crps.getTotalInstances,
- maxInstancesPerNode = crps.getMaxInstancesPerNode,
- allowLocalRoutees = crps.getAllowLocalRoutees,
- useRoles = if (crps.hasUseRole) {
- crps.getUseRolesList.asScala.toSet + crps.getUseRole
- } else {
- crps.getUseRolesList.asScala.toSet
- })
+ ClusterRouterPoolSettings(
+ totalInstances = crps.getTotalInstances,
+ maxInstancesPerNode = crps.getMaxInstancesPerNode,
+ allowLocalRoutees = crps.getAllowLocalRoutees,
+ useRoles = if (crps.hasUseRole) {
+ crps.getUseRolesList.asScala.toSet + crps.getUseRole
+ } else {
+ crps.getUseRolesList.asScala.toSet
+ })
}
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala
index afd6619b11..c75e1089eb 100644
--- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala
@@ -33,39 +33,40 @@ import scala.collection.JavaConverters._
object ClusterRouterGroupSettings {
@deprecated("useRole has been replaced with useRoles", since = "2.5.4")
- def apply(totalInstances: Int,
- routeesPaths: immutable.Seq[String],
- allowLocalRoutees: Boolean,
- useRole: Option[String]): ClusterRouterGroupSettings =
+ def apply(
+ totalInstances: Int,
+ routeesPaths: immutable.Seq[String],
+ allowLocalRoutees: Boolean,
+ useRole: Option[String]): ClusterRouterGroupSettings =
ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRole.toSet)
@varargs
- def apply(totalInstances: Int,
- routeesPaths: immutable.Seq[String],
- allowLocalRoutees: Boolean,
- useRoles: String*): ClusterRouterGroupSettings =
+ def apply(
+ totalInstances: Int,
+ routeesPaths: immutable.Seq[String],
+ allowLocalRoutees: Boolean,
+ useRoles: String*): ClusterRouterGroupSettings =
ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.toSet)
// For backwards compatibility, useRoles is the combination of use-roles and use-role
def fromConfig(config: Config): ClusterRouterGroupSettings =
- ClusterRouterGroupSettings(totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
- routeesPaths = immutableSeq(config.getStringList("routees.paths")),
- allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"),
- useRoles = config
- .getStringList("cluster.use-roles")
- .asScala
- .toSet ++ ClusterRouterSettingsBase.useRoleOption(
- config.getString("cluster.use-role")))
+ ClusterRouterGroupSettings(
+ totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
+ routeesPaths = immutableSeq(config.getStringList("routees.paths")),
+ allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"),
+ useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption(
+ config.getString("cluster.use-role")))
}
/**
* `totalInstances` of cluster router must be > 0
*/
@SerialVersionUID(1L)
-final case class ClusterRouterGroupSettings(totalInstances: Int,
- routeesPaths: immutable.Seq[String],
- allowLocalRoutees: Boolean,
- useRoles: Set[String])
+final case class ClusterRouterGroupSettings(
+ totalInstances: Int,
+ routeesPaths: immutable.Seq[String],
+ allowLocalRoutees: Boolean,
+ useRoles: Set[String])
extends ClusterRouterSettingsBase {
// For binary compatibility
@@ -73,10 +74,11 @@ final case class ClusterRouterGroupSettings(totalInstances: Int,
def useRole: Option[String] = useRoles.headOption
@deprecated("useRole has been replaced with useRoles", since = "2.5.4")
- def this(totalInstances: Int,
- routeesPaths: immutable.Seq[String],
- allowLocalRoutees: Boolean,
- useRole: Option[String]) =
+ def this(
+ totalInstances: Int,
+ routeesPaths: immutable.Seq[String],
+ allowLocalRoutees: Boolean,
+ useRole: Option[String]) =
this(totalInstances, routeesPaths, allowLocalRoutees, useRole.toSet)
/**
@@ -89,18 +91,20 @@ final case class ClusterRouterGroupSettings(totalInstances: Int,
/**
* Java API
*/
- def this(totalInstances: Int,
- routeesPaths: java.lang.Iterable[String],
- allowLocalRoutees: Boolean,
- useRoles: java.util.Set[String]) =
+ def this(
+ totalInstances: Int,
+ routeesPaths: java.lang.Iterable[String],
+ allowLocalRoutees: Boolean,
+ useRoles: java.util.Set[String]) =
this(totalInstances, immutableSeq(routeesPaths), allowLocalRoutees, useRoles.asScala.toSet)
// For binary compatibility
@deprecated("Use constructor with useRoles instead", since = "2.5.4")
- def copy(totalInstances: Int = totalInstances,
- routeesPaths: immutable.Seq[String] = routeesPaths,
- allowLocalRoutees: Boolean = allowLocalRoutees,
- useRole: Option[String] = useRole): ClusterRouterGroupSettings =
+ def copy(
+ totalInstances: Int = totalInstances,
+ routeesPaths: immutable.Seq[String] = routeesPaths,
+ allowLocalRoutees: Boolean = allowLocalRoutees,
+ useRole: Option[String] = useRole): ClusterRouterGroupSettings =
new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRole)
if (totalInstances <= 0) throw new IllegalArgumentException("totalInstances of cluster router must be > 0")
@@ -129,29 +133,29 @@ final case class ClusterRouterGroupSettings(totalInstances: Int,
object ClusterRouterPoolSettings {
@deprecated("useRole has been replaced with useRoles", since = "2.5.4")
- def apply(totalInstances: Int,
- maxInstancesPerNode: Int,
- allowLocalRoutees: Boolean,
- useRole: Option[String]): ClusterRouterPoolSettings =
+ def apply(
+ totalInstances: Int,
+ maxInstancesPerNode: Int,
+ allowLocalRoutees: Boolean,
+ useRole: Option[String]): ClusterRouterPoolSettings =
ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole.toSet)
@varargs
- def apply(totalInstances: Int,
- maxInstancesPerNode: Int,
- allowLocalRoutees: Boolean,
- useRoles: String*): ClusterRouterPoolSettings =
+ def apply(
+ totalInstances: Int,
+ maxInstancesPerNode: Int,
+ allowLocalRoutees: Boolean,
+ useRoles: String*): ClusterRouterPoolSettings =
ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.toSet)
// For backwards compatibility, useRoles is the combination of use-roles and use-role
def fromConfig(config: Config): ClusterRouterPoolSettings =
- ClusterRouterPoolSettings(totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
- maxInstancesPerNode = config.getInt("cluster.max-nr-of-instances-per-node"),
- allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"),
- useRoles = config
- .getStringList("cluster.use-roles")
- .asScala
- .toSet ++ ClusterRouterSettingsBase.useRoleOption(
- config.getString("cluster.use-role")))
+ ClusterRouterPoolSettings(
+ totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
+ maxInstancesPerNode = config.getInt("cluster.max-nr-of-instances-per-node"),
+ allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"),
+ useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption(
+ config.getString("cluster.use-role")))
}
/**
@@ -160,10 +164,11 @@ object ClusterRouterPoolSettings {
* `maxInstancesPerNode` of cluster router must be 1 when routeesPath is defined
*/
@SerialVersionUID(1L)
-final case class ClusterRouterPoolSettings(totalInstances: Int,
- maxInstancesPerNode: Int,
- allowLocalRoutees: Boolean,
- useRoles: Set[String])
+final case class ClusterRouterPoolSettings(
+ totalInstances: Int,
+ maxInstancesPerNode: Int,
+ allowLocalRoutees: Boolean,
+ useRoles: Set[String])
extends ClusterRouterSettingsBase {
// For binary compatibility
@@ -189,10 +194,11 @@ final case class ClusterRouterPoolSettings(totalInstances: Int,
// For binary compatibility
@deprecated("Use copy with useRoles instead", since = "2.5.4")
- def copy(totalInstances: Int = totalInstances,
- maxInstancesPerNode: Int = maxInstancesPerNode,
- allowLocalRoutees: Boolean = allowLocalRoutees,
- useRole: Option[String] = useRole): ClusterRouterPoolSettings =
+ def copy(
+ totalInstances: Int = totalInstances,
+ maxInstancesPerNode: Int = maxInstancesPerNode,
+ allowLocalRoutees: Boolean = allowLocalRoutees,
+ useRole: Option[String] = useRole): ClusterRouterPoolSettings =
new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole)
if (maxInstancesPerNode <= 0)
@@ -362,8 +368,9 @@ private[akka] trait ClusterRouterConfigBase extends RouterConfig {
/**
* INTERNAL API
*/
-private[akka] class ClusterRouterPoolActor(supervisorStrategy: SupervisorStrategy,
- val settings: ClusterRouterPoolSettings)
+private[akka] class ClusterRouterPoolActor(
+ supervisorStrategy: SupervisorStrategy,
+ val settings: ClusterRouterPoolSettings)
extends RouterPoolActor(supervisorStrategy)
with ClusterRouterActor {
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala
index 9ade5c5c54..bc76143d09 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala
@@ -106,8 +106,9 @@ abstract class ClusterDeathWatchSpec
}
runOn(second, third, fourth) {
- system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
- name = "subject")
+ system.actorOf(
+ Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
+ name = "subject")
enterBarrier("subjected-started")
enterBarrier("watch-established")
runOn(third) {
@@ -157,8 +158,9 @@ abstract class ClusterDeathWatchSpec
"be able to watch actor before node joins cluster, ClusterRemoteWatcher takes over from RemoteWatcher" in within(
20 seconds) {
runOn(fifth) {
- system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
- name = "subject5")
+ system.actorOf(
+ Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
+ name = "subject5")
}
enterBarrier("subjected-started")
@@ -242,9 +244,8 @@ abstract class ClusterDeathWatchSpec
catch {
case _: TimeoutException =>
fail(
- "Failed to stop [%s] within [%s] \n%s".format(system.name,
- timeout,
- system.asInstanceOf[ActorSystemImpl].printTree))
+ "Failed to stop [%s] within [%s] \n%s"
+ .format(system.name, timeout, system.asInstanceOf[ActorSystemImpl].printTree))
}
// signal to the first node that fourth is done
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala
index c5cff78299..3f95da539f 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala
@@ -237,8 +237,9 @@ abstract class MultiDcSplitBrainSpec extends MultiNodeSpec(MultiDcSplitBrainMult
Await.ready(system.whenTerminated, remaining)
val port = Cluster(system).selfAddress.port.get
- val restartedSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val restartedSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = $port
akka.remote.artery.canonical.port = $port
akka.coordinated-shutdown.terminate-actor-system = on
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
index ea5db00b7b..7e17f3b419 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
@@ -123,26 +123,28 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
def muteLog(sys: ActorSystem = system): Unit = {
if (!sys.log.isDebugEnabled) {
- Seq(".*Cluster Node.* - registered cluster JMX MBean.*",
- ".*Cluster Node.* - is starting up.*",
- ".*Shutting down cluster Node.*",
- ".*Cluster node successfully shut down.*",
- ".*Using a dedicated scheduler for cluster.*").foreach { s =>
+ Seq(
+ ".*Cluster Node.* - registered cluster JMX MBean.*",
+ ".*Cluster Node.* - is starting up.*",
+ ".*Shutting down cluster Node.*",
+ ".*Cluster node successfully shut down.*",
+ ".*Using a dedicated scheduler for cluster.*").foreach { s =>
sys.eventStream.publish(Mute(EventFilter.info(pattern = s)))
}
- muteDeadLetters(classOf[ClusterHeartbeatSender.Heartbeat],
- classOf[ClusterHeartbeatSender.HeartbeatRsp],
- classOf[GossipEnvelope],
- classOf[GossipStatus],
- classOf[InternalClusterAction.Tick],
- classOf[akka.actor.PoisonPill],
- classOf[akka.dispatch.sysmsg.DeathWatchNotification],
- classOf[akka.remote.transport.AssociationHandle.Disassociated],
- // akka.remote.transport.AssociationHandle.Disassociated.getClass,
- classOf[akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying],
- // akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass,
- classOf[akka.remote.transport.AssociationHandle.InboundPayload])(sys)
+ muteDeadLetters(
+ classOf[ClusterHeartbeatSender.Heartbeat],
+ classOf[ClusterHeartbeatSender.HeartbeatRsp],
+ classOf[GossipEnvelope],
+ classOf[GossipStatus],
+ classOf[InternalClusterAction.Tick],
+ classOf[akka.actor.PoisonPill],
+ classOf[akka.dispatch.sysmsg.DeathWatchNotification],
+ classOf[akka.remote.transport.AssociationHandle.Disassociated],
+ // akka.remote.transport.AssociationHandle.Disassociated.getClass,
+ classOf[akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying],
+ // akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass,
+ classOf[akka.remote.transport.AssociationHandle.InboundPayload])(sys)
}
}
@@ -299,8 +301,9 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
val expectedLeader = roleOfLeader(nodesInCluster)
val leader = clusterView.leader
val isLeader = leader == Some(clusterView.selfAddress)
- assert(isLeader == isNode(expectedLeader),
- "expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members))
+ assert(
+ isLeader == isNode(expectedLeader),
+ "expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members))
clusterView.status should (be(MemberStatus.Up).or(be(MemberStatus.Leaving)))
}
@@ -308,9 +311,10 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
* Wait until the expected number of members has status Up has been reached.
* Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring.
*/
- def awaitMembersUp(numberOfMembers: Int,
- canNotBePartOfMemberRing: Set[Address] = Set.empty,
- timeout: FiniteDuration = 25.seconds): Unit = {
+ def awaitMembersUp(
+ numberOfMembers: Int,
+ canNotBePartOfMemberRing: Set[Address] = Set.empty,
+ timeout: FiniteDuration = 25.seconds): Unit = {
within(timeout) {
if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set
awaitAssert(canNotBePartOfMemberRing.foreach(a => clusterView.members.map(_.address) should not contain (a)))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala
index 0f6e416785..4999556ae0 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala
@@ -64,9 +64,10 @@ abstract class QuickRestartSpec
system.name,
ConfigFactory.parseString(s"akka.cluster.roles = [round-$n]").withFallback(system.settings.config))
else
- ActorSystem(system.name,
- // use the same port
- ConfigFactory.parseString(s"""
+ ActorSystem(
+ system.name,
+ // use the same port
+ ConfigFactory.parseString(s"""
akka.cluster.roles = [round-$n]
akka.remote.netty.tcp.port = ${Cluster(restartingSystem).selfAddress.port.get}
akka.remote.artery.canonical.port = ${Cluster(restartingSystem).selfAddress.port.get}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala
index 1ca2fb2d7f..efd6f73272 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala
@@ -53,8 +53,9 @@ abstract class RestartFirstSeedNodeSpec
def missingSeed = address(seed3).copy(port = Some(61313))
def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2, seed3, missingSeed)
- lazy val restartedSeed1System = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ lazy val restartedSeed1System = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${seedNodes.head.port.get}
akka.remote.artery.canonical.port = ${seedNodes.head.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala
index 22c3d1e073..a68ec970af 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala
@@ -53,8 +53,9 @@ abstract class RestartNode2SpecSpec
def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2)
// this is the node that will attempt to re-join, keep gate times low so it can retry quickly
- lazy val restartedSeed1System = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ lazy val restartedSeed1System = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${seedNodes.head.port.get}
akka.remote.artery.canonical.port = ${seedNodes.head.port.get}
#akka.remote.retry-gate-closed-for = 1s
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala
index 5674c89a96..bf2f1383dc 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala
@@ -52,8 +52,9 @@ abstract class RestartNode3Spec
def seedNodes: immutable.IndexedSeq[Address] = Vector(first)
- lazy val restartedSecondSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ lazy val restartedSecondSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get}
akka.remote.netty.tcp.port = ${secondUniqueAddress.address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala
index 5708e1f48b..4d149b3250 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala
@@ -72,8 +72,9 @@ abstract class RestartNodeSpec
def seedNodes: immutable.IndexedSeq[Address] = Vector(first, secondUniqueAddress.address, third)
- lazy val restartedSecondSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ lazy val restartedSecondSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${secondUniqueAddress.address.port.get}
akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
index b24374450e..18e89e3df0 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
@@ -218,8 +218,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
numberOfNodesShutdownOneByOneSmall + numberOfNodesShutdownOneByOneLarge + numberOfNodesShutdown <= totalNumberOfNodes - 3,
s"specified number of leaving/shutdown nodes <= ${totalNumberOfNodes - 3}")
- require(numberOfNodesJoinRemove <= totalNumberOfNodes,
- s"nr-of-nodes-join-remove should be <= ${totalNumberOfNodes}")
+ require(
+ numberOfNodesJoinRemove <= totalNumberOfNodes,
+ s"nr-of-nodes-join-remove should be <= ${totalNumberOfNodes}")
override def toString: String = {
testConfig.withFallback(ConfigFactory.parseString(s"nrOfNodes=${totalNumberOfNodes}")).root.render
@@ -379,10 +380,11 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
val φ = phi(node)
if (φ > 0 || cluster.failureDetector.isMonitoring(node)) {
val aboveOne = if (!φ.isInfinite && φ > 1.0) 1 else 0
- phiByNode += node -> PhiValue(node,
- previous.countAboveOne + aboveOne,
- previous.count + 1,
- math.max(previous.max, φ))
+ phiByNode += node -> PhiValue(
+ node,
+ previous.countAboveOne + aboveOne,
+ previous.count + 1,
+ math.max(previous.max, φ))
}
}
val phiSet = immutable.SortedSet.empty[PhiValue] ++ phiByNode.values
@@ -518,11 +520,12 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
def createJob(): Job = {
if (tree)
- TreeJob(idCounter.next(),
- payload,
- ThreadLocalRandom.current.nextInt(settings.treeWidth),
- settings.treeLevels,
- settings.treeWidth)
+ TreeJob(
+ idCounter.next(),
+ payload,
+ ThreadLocalRandom.current.nextInt(settings.treeWidth),
+ settings.treeLevels,
+ settings.treeWidth)
else SimpleJob(idCounter.next(), payload)
}
@@ -549,10 +552,11 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
case TreeJob(id, payload, idx, levels, width) =>
// create the actors when first TreeJob message is received
val totalActors = ((width * math.pow(width, levels) - 1) / (width - 1)).toInt
- log.debug("Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children",
- totalActors,
- levels,
- width)
+ log.debug(
+ "Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children",
+ totalActors,
+ levels,
+ width)
val tree = context.actorOf(Props(classOf[TreeNode], levels, width), "tree")
tree.forward((idx, SimpleJob(id, payload)))
context.become(treeWorker(tree))
@@ -610,8 +614,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
case e: Exception => context.children.foreach { _ ! e }
case GetChildrenCount => sender() ! ChildrenCount(context.children.size, restartCount)
case Reset =>
- require(context.children.isEmpty,
- s"ResetChildrenCount not allowed when children exists, [${context.children.size}]")
+ require(
+ context.children.isEmpty,
+ s"ResetChildrenCount not allowed when children exists, [${context.children.size}]")
restartCount = 0
}
}
@@ -700,12 +705,13 @@ abstract class StressSpec
override def muteLog(sys: ActorSystem = system): Unit = {
super.muteLog(sys)
sys.eventStream.publish(Mute(EventFilter[RuntimeException](pattern = ".*Simulated exception.*")))
- muteDeadLetters(classOf[SimpleJob],
- classOf[AggregatedClusterResult],
- SendBatch.getClass,
- classOf[StatsResult],
- classOf[PhiResult],
- RetryTick.getClass)(sys)
+ muteDeadLetters(
+ classOf[SimpleJob],
+ classOf[AggregatedClusterResult],
+ SendBatch.getClass,
+ classOf[StatsResult],
+ classOf[PhiResult],
+ RetryTick.getClass)(sys)
}
override protected def afterTermination(): Unit = {
@@ -974,9 +980,10 @@ abstract class StressSpec
val usedRoles = roles.take(nbrUsedRoles)
val usedAddresses = usedRoles.map(address(_)).toSet
- @tailrec def loop(counter: Int,
- previousAS: Option[ActorSystem],
- allPreviousAddresses: Set[Address]): Option[ActorSystem] = {
+ @tailrec def loop(
+ counter: Int,
+ previousAS: Option[ActorSystem],
+ allPreviousAddresses: Set[Address]): Option[ActorSystem] = {
if (counter > rounds) previousAS
else {
val t = title + " round " + counter
@@ -998,9 +1005,10 @@ abstract class StressSpec
Some(sys)
} else previousAS
runOn(usedRoles: _*) {
- awaitMembersUp(nbrUsedRoles + activeRoles.size,
- canNotBePartOfMemberRing = allPreviousAddresses,
- timeout = remainingOrDefault)
+ awaitMembersUp(
+ nbrUsedRoles + activeRoles.size,
+ canNotBePartOfMemberRing = allPreviousAddresses,
+ timeout = remainingOrDefault)
awaitAllReachable()
}
val nextAddresses = clusterView.members.map(_.address).diff(usedAddresses)
@@ -1041,11 +1049,12 @@ abstract class StressSpec
identifyProbe.expectMsgType[ActorIdentity].ref
}
- def exerciseRouters(title: String,
- duration: FiniteDuration,
- batchInterval: FiniteDuration,
- expectDroppedMessages: Boolean,
- tree: Boolean): Unit =
+ def exerciseRouters(
+ title: String,
+ duration: FiniteDuration,
+ batchInterval: FiniteDuration,
+ expectDroppedMessages: Boolean,
+ tree: Boolean): Unit =
within(duration + 10.seconds) {
nbrUsedRoles should ===(totalNumberOfNodes)
createResultAggregator(title, expectedResults = nbrUsedRoles, includeInHistory = false)
@@ -1053,8 +1062,9 @@ abstract class StressSpec
val (masterRoles, otherRoles) = roles.take(nbrUsedRoles).splitAt(3)
runOn(masterRoles: _*) {
reportResult {
- val m = system.actorOf(Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local),
- name = masterName)
+ val m = system.actorOf(
+ Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local),
+ name = masterName)
m ! Begin
import system.dispatcher
system.scheduler.scheduleOnce(duration) {
@@ -1081,11 +1091,12 @@ abstract class StressSpec
def awaitWorkResult(m: ActorRef): WorkResult = {
val workResult = expectMsgType[WorkResult]
if (settings.infolog)
- log.info("{} result, [{}] jobs/s, retried [{}] of [{}] msg",
- masterName,
- workResult.jobsPerSecond.form,
- workResult.retryCount,
- workResult.sendCount)
+ log.info(
+ "{} result, [{}] jobs/s, retried [{}] of [{}] msg",
+ masterName,
+ workResult.jobsPerSecond.form,
+ workResult.retryCount,
+ workResult.sendCount)
watch(m)
expectTerminated(m)
workResult
@@ -1190,8 +1201,9 @@ abstract class StressSpec
"start routers that are running while nodes are joining" taggedAs LongRunningTest in {
runOn(roles.take(3): _*) {
- system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
- name = masterName) ! Begin
+ system.actorOf(
+ Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
+ name = masterName) ! Begin
}
}
@@ -1238,44 +1250,48 @@ abstract class StressSpec
"use routers with normal throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
- exerciseRouters("use routers with normal throughput",
- normalThroughputDuration,
- batchInterval = workBatchInterval,
- expectDroppedMessages = false,
- tree = false)
+ exerciseRouters(
+ "use routers with normal throughput",
+ normalThroughputDuration,
+ batchInterval = workBatchInterval,
+ expectDroppedMessages = false,
+ tree = false)
}
enterBarrier("after-" + step)
}
"use routers with high throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
- exerciseRouters("use routers with high throughput",
- highThroughputDuration,
- batchInterval = Duration.Zero,
- expectDroppedMessages = false,
- tree = false)
+ exerciseRouters(
+ "use routers with high throughput",
+ highThroughputDuration,
+ batchInterval = Duration.Zero,
+ expectDroppedMessages = false,
+ tree = false)
}
enterBarrier("after-" + step)
}
"use many actors with normal throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
- exerciseRouters("use many actors with normal throughput",
- normalThroughputDuration,
- batchInterval = workBatchInterval,
- expectDroppedMessages = false,
- tree = true)
+ exerciseRouters(
+ "use many actors with normal throughput",
+ normalThroughputDuration,
+ batchInterval = workBatchInterval,
+ expectDroppedMessages = false,
+ tree = true)
}
enterBarrier("after-" + step)
}
"use many actors with high throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
- exerciseRouters("use many actors with high throughput",
- highThroughputDuration,
- batchInterval = Duration.Zero,
- expectDroppedMessages = false,
- tree = true)
+ exerciseRouters(
+ "use many actors with high throughput",
+ highThroughputDuration,
+ batchInterval = Duration.Zero,
+ expectDroppedMessages = false,
+ tree = true)
}
enterBarrier("after-" + step)
}
@@ -1300,8 +1316,9 @@ abstract class StressSpec
"start routers that are running while nodes are removed" taggedAs LongRunningTest in {
if (exerciseActors) {
runOn(roles.take(3): _*) {
- system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
- name = masterName) ! Begin
+ system.actorOf(
+ Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
+ name = masterName) ! Begin
}
}
enterBarrier("after-" + step)
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala
index 100f638a62..5cdd5b6757 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala
@@ -122,10 +122,10 @@ abstract class ClusterConsistentHashingRouterSpec
"deploy programatically defined routees to the member nodes in the cluster" in {
runOn(first) {
val router2 = system.actorOf(
- ClusterRouterPool(local = ConsistentHashingPool(nrOfInstances = 0),
- settings = ClusterRouterPoolSettings(totalInstances = 10,
- maxInstancesPerNode = 2,
- allowLocalRoutees = true)).props(Props[Echo]),
+ ClusterRouterPool(
+ local = ConsistentHashingPool(nrOfInstances = 0),
+ settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true))
+ .props(Props[Echo]),
"router2")
// it may take some time until router receives cluster member events
awaitAssert { currentRoutees(router2).size should ===(6) }
@@ -143,8 +143,9 @@ abstract class ClusterConsistentHashingRouterSpec
}
val router3 =
- system.actorOf(ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping).props(Props[Echo]),
- "router3")
+ system.actorOf(
+ ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping).props(Props[Echo]),
+ "router3")
assertHashMapping(router3)
}
@@ -159,12 +160,13 @@ abstract class ClusterConsistentHashingRouterSpec
}
val router4 =
- system.actorOf(ClusterRouterPool(local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping),
- settings =
- ClusterRouterPoolSettings(totalInstances = 10,
- maxInstancesPerNode = 1,
- allowLocalRoutees = true)).props(Props[Echo]),
- "router4")
+ system.actorOf(
+ ClusterRouterPool(
+ local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping),
+ settings =
+ ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true))
+ .props(Props[Echo]),
+ "router4")
assertHashMapping(router4)
}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala
index 61ff8c65a5..34174cb309 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala
@@ -118,8 +118,9 @@ abstract class ClusterRoundRobinSpec
lazy val router1 = system.actorOf(FromConfig.props(Props[SomeActor]), "router1")
lazy val router2 = system.actorOf(
- ClusterRouterPool(RoundRobinPool(nrOfInstances = 0),
- ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true))
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 0),
+ ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true))
.props(Props[SomeActor]),
"router2")
lazy val router3 = system.actorOf(FromConfig.props(Props[SomeActor]), "router3")
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala
index 9314c6c27d..06a6eadb3c 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala
@@ -104,11 +104,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("b")
val router = system.actorOf(
- ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6,
- maxInstancesPerNode = 2,
- allowLocalRoutees = false,
- useRoles = roles)).props(Props[SomeActor]),
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(
+ totalInstances = 6,
+ maxInstancesPerNode = 2,
+ allowLocalRoutees = false,
+ useRoles = roles)).props(Props[SomeActor]),
"router-2")
awaitAssert(currentRoutees(router).size should ===(4))
@@ -134,13 +136,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("b")
- val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
- ClusterRouterGroupSettings(totalInstances = 6,
- routeesPaths =
- List("/user/foo", "/user/bar"),
- allowLocalRoutees = false,
- useRoles = roles)).props,
- "router-2b")
+ val router = system.actorOf(
+ ClusterRouterGroup(
+ RoundRobinGroup(paths = Nil),
+ ClusterRouterGroupSettings(
+ totalInstances = 6,
+ routeesPaths = List("/user/foo", "/user/bar"),
+ allowLocalRoutees = false,
+ useRoles = roles)).props,
+ "router-2b")
awaitAssert(currentRoutees(router).size should ===(4))
@@ -166,11 +170,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("b")
val router = system.actorOf(
- ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6,
- maxInstancesPerNode = 2,
- allowLocalRoutees = true,
- useRoles = roles)).props(Props[SomeActor]),
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(
+ totalInstances = 6,
+ maxInstancesPerNode = 2,
+ allowLocalRoutees = true,
+ useRoles = roles)).props(Props[SomeActor]),
"router-3")
awaitAssert(currentRoutees(router).size should ===(4))
@@ -196,13 +202,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("b")
- val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
- ClusterRouterGroupSettings(totalInstances = 6,
- routeesPaths =
- List("/user/foo", "/user/bar"),
- allowLocalRoutees = true,
- useRoles = roles)).props,
- "router-3b")
+ val router = system.actorOf(
+ ClusterRouterGroup(
+ RoundRobinGroup(paths = Nil),
+ ClusterRouterGroupSettings(
+ totalInstances = 6,
+ routeesPaths = List("/user/foo", "/user/bar"),
+ allowLocalRoutees = true,
+ useRoles = roles)).props,
+ "router-3b")
awaitAssert(currentRoutees(router).size should ===(4))
@@ -228,11 +236,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("a")
val router = system.actorOf(
- ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6,
- maxInstancesPerNode = 2,
- allowLocalRoutees = true,
- useRoles = roles)).props(Props[SomeActor]),
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(
+ totalInstances = 6,
+ maxInstancesPerNode = 2,
+ allowLocalRoutees = true,
+ useRoles = roles)).props(Props[SomeActor]),
"router-4")
awaitAssert(currentRoutees(router).size should ===(2))
@@ -258,13 +268,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("a")
- val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
- ClusterRouterGroupSettings(totalInstances = 6,
- routeesPaths =
- List("/user/foo", "/user/bar"),
- allowLocalRoutees = true,
- useRoles = roles)).props,
- "router-4b")
+ val router = system.actorOf(
+ ClusterRouterGroup(
+ RoundRobinGroup(paths = Nil),
+ ClusterRouterGroupSettings(
+ totalInstances = 6,
+ routeesPaths = List("/user/foo", "/user/bar"),
+ allowLocalRoutees = true,
+ useRoles = roles)).props,
+ "router-4b")
awaitAssert(currentRoutees(router).size should ===(2))
@@ -290,11 +302,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("c")
val router = system.actorOf(
- ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6,
- maxInstancesPerNode = 2,
- allowLocalRoutees = true,
- useRoles = roles)).props(Props[SomeActor]),
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(
+ totalInstances = 6,
+ maxInstancesPerNode = 2,
+ allowLocalRoutees = true,
+ useRoles = roles)).props(Props[SomeActor]),
"router-5")
awaitAssert(currentRoutees(router).size should ===(6))
@@ -320,13 +334,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("c")
- val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
- ClusterRouterGroupSettings(totalInstances = 6,
- routeesPaths =
- List("/user/foo", "/user/bar"),
- allowLocalRoutees = true,
- useRoles = roles)).props,
- "router-5b")
+ val router = system.actorOf(
+ ClusterRouterGroup(
+ RoundRobinGroup(paths = Nil),
+ ClusterRouterGroupSettings(
+ totalInstances = 6,
+ routeesPaths = List("/user/foo", "/user/bar"),
+ allowLocalRoutees = true,
+ useRoles = roles)).props,
+ "router-5b")
awaitAssert(currentRoutees(router).size should ===(6))
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala
index fca05365fd..55665f3480 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala
@@ -57,7 +57,8 @@ class ClusterConfigSpec extends AkkaSpec {
}
"be able to parse non-default cluster config elements" in {
- val settings = new ClusterSettings(ConfigFactory.parseString("""
+ val settings = new ClusterSettings(
+ ConfigFactory.parseString("""
|akka {
| cluster {
| roles = [ "hamlet" ]
@@ -65,7 +66,7 @@ class ClusterConfigSpec extends AkkaSpec {
| }
|}
""".stripMargin).withFallback(ConfigFactory.load()),
- system.name)
+ system.name)
import settings._
Roles should ===(Set("hamlet", ClusterSettings.DcRolePrefix + "blue"))
SelfDataCenter should ===("blue")
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala
index f9fc25ec3f..43f8f937cc 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala
@@ -14,7 +14,8 @@ import akka.cluster.routing.ClusterRouterPoolSettings
import akka.cluster.routing.ClusterRouterGroupSettings
object ClusterDeployerSpec {
- val deployerConf = ConfigFactory.parseString("""
+ val deployerConf = ConfigFactory.parseString(
+ """
akka.actor.provider = "cluster"
akka.actor.deployment {
/user/service1 {
@@ -37,7 +38,7 @@ object ClusterDeployerSpec {
akka.remote.netty.tcp.port = 0
akka.remote.artery.canonical.port = 0
""",
- ConfigParseOptions.defaults)
+ ConfigParseOptions.defaults)
class RecipeActor extends Actor {
def receive = { case _ => }
@@ -55,15 +56,15 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) {
deployment should not be (None)
deployment should ===(
- Some(Deploy(service,
- deployment.get.config,
- ClusterRouterPool(RoundRobinPool(20),
- ClusterRouterPoolSettings(totalInstances = 20,
- maxInstancesPerNode = 3,
- allowLocalRoutees = false)),
- ClusterScope,
- Deploy.NoDispatcherGiven,
- Deploy.NoMailboxGiven)))
+ Some(Deploy(
+ service,
+ deployment.get.config,
+ ClusterRouterPool(
+ RoundRobinPool(20),
+ ClusterRouterPoolSettings(totalInstances = 20, maxInstancesPerNode = 3, allowLocalRoutees = false)),
+ ClusterScope,
+ Deploy.NoDispatcherGiven,
+ Deploy.NoMailboxGiven)))
}
"be able to parse 'akka.actor.deployment._' with specified cluster group" in {
@@ -72,15 +73,18 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) {
deployment should not be (None)
deployment should ===(
- Some(Deploy(service,
- deployment.get.config,
- ClusterRouterGroup(RoundRobinGroup(List("/user/myservice")),
- ClusterRouterGroupSettings(totalInstances = 20,
- routeesPaths = List("/user/myservice"),
- allowLocalRoutees = false)),
- ClusterScope,
- "mydispatcher",
- "mymailbox")))
+ Some(Deploy(
+ service,
+ deployment.get.config,
+ ClusterRouterGroup(
+ RoundRobinGroup(List("/user/myservice")),
+ ClusterRouterGroupSettings(
+ totalInstances = 20,
+ routeesPaths = List("/user/myservice"),
+ allowLocalRoutees = false)),
+ ClusterScope,
+ "mydispatcher",
+ "mymailbox")))
}
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala
index adec870a7c..76cf4439ae 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala
@@ -80,17 +80,18 @@ class ClusterDomainEventPublisherSpec
val state6 = state(g6, aUp.uniqueAddress, DefaultDataCenter)
val g7 = Gossip(members = SortedSet(aExiting, bExiting, cUp)).seen(aUp.uniqueAddress)
val state7 = state(g7, aUp.uniqueAddress, DefaultDataCenter)
- val g8 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp),
- overview =
- GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress)))
+ val g8 = Gossip(
+ members = SortedSet(aUp, bExiting, cUp, dUp),
+ overview = GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress)))
.seen(aUp.uniqueAddress)
val state8 = state(g8, aUp.uniqueAddress, DefaultDataCenter)
- val g9 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp),
- overview =
- GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, eUp.uniqueAddress)))
+ val g9 = Gossip(
+ members = SortedSet(aUp, bExiting, cUp, dUp, eUp),
+ overview = GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, eUp.uniqueAddress)))
val state9 = state(g9, aUp.uniqueAddress, DefaultDataCenter)
- val g10 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp),
- overview = GossipOverview(reachability = Reachability.empty))
+ val g10 = Gossip(
+ members = SortedSet(aUp, bExiting, cUp, dUp, eUp),
+ overview = GossipOverview(reachability = Reachability.empty))
val state10 = state(g10, aUp.uniqueAddress, DefaultDataCenter)
// created in beforeEach
@@ -166,9 +167,9 @@ class ClusterDomainEventPublisherSpec
subscriber.expectMsgType[CurrentClusterState]
publisher ! PublishChanges(
state(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress, DefaultDataCenter))
- subscriber.expectMsgAllOf(RoleLeaderChanged("GRP", Some(dUp.address)),
- RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter,
- Some(dUp.address)))
+ subscriber.expectMsgAllOf(
+ RoleLeaderChanged("GRP", Some(dUp.address)),
+ RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(dUp.address)))
publisher ! PublishChanges(state(Gossip(members = SortedSet(cUp, dUp)), dUp.uniqueAddress, DefaultDataCenter))
subscriber.expectMsg(RoleLeaderChanged("GRP", Some(cUp.address)))
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala
index 64b9e8d98a..49bf0eb751 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala
@@ -99,26 +99,30 @@ class ClusterDomainEventSpec extends WordSpec with Matchers {
val dc3BMemberUp = TestMember(Address("akka.tcp", "sys", "dc3B", 2552), Up, Set.empty[String], "dc3")
val reachability1 = Reachability.empty
- val g1 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp),
- overview = GossipOverview(reachability = reachability1))
+ val g1 = Gossip(
+ members = SortedSet(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp),
+ overview = GossipOverview(reachability = reachability1))
val reachability2 = reachability1
.unreachable(aUp.uniqueAddress, dc2AMemberDown.uniqueAddress)
.unreachable(dc2BMemberUp.uniqueAddress, dc2AMemberDown.uniqueAddress)
- val g2 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberDown, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp),
- overview = GossipOverview(reachability = reachability2))
+ val g2 = Gossip(
+ members = SortedSet(aUp, bUp, dc2AMemberDown, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp),
+ overview = GossipOverview(reachability = reachability2))
Set(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp).foreach { member =>
val otherDc =
if (member.dataCenter == ClusterSettings.DefaultDataCenter) Seq("dc2")
else Seq()
- diffUnreachableDataCenter(MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5),
- MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(
+ diffUnreachableDataCenter(
+ MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5),
+ MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(
otherDc.map(UnreachableDataCenter))
- diffReachableDataCenter(MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5),
- MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(
+ diffReachableDataCenter(
+ MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5),
+ MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(
otherDc.map(ReachableDataCenter))
}
}
@@ -234,19 +238,20 @@ class ClusterDomainEventSpec extends WordSpec with Matchers {
val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining))
diffRolesLeader(state(g0), state(g1)) should ===(
Set(
- // since this role is implicitly added
- RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(aUp.address)),
- RoleLeaderChanged("AA", Some(aUp.address)),
- RoleLeaderChanged("AB", Some(aUp.address)),
- RoleLeaderChanged("BB", Some(bUp.address)),
- RoleLeaderChanged("DD", Some(dLeaving.address)),
- RoleLeaderChanged("DE", Some(dLeaving.address)),
- RoleLeaderChanged("EE", Some(eUp.address))))
+ // since this role is implicitly added
+ RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(aUp.address)),
+ RoleLeaderChanged("AA", Some(aUp.address)),
+ RoleLeaderChanged("AB", Some(aUp.address)),
+ RoleLeaderChanged("BB", Some(bUp.address)),
+ RoleLeaderChanged("DD", Some(dLeaving.address)),
+ RoleLeaderChanged("DE", Some(dLeaving.address)),
+ RoleLeaderChanged("EE", Some(eUp.address))))
diffRolesLeader(state(g1), state(g2)) should ===(
- Set(RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(bUp.address)),
- RoleLeaderChanged("AA", None),
- RoleLeaderChanged("AB", Some(bUp.address)),
- RoleLeaderChanged("DE", Some(eJoining.address))))
+ Set(
+ RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(bUp.address)),
+ RoleLeaderChanged("AA", None),
+ RoleLeaderChanged("AB", Some(bUp.address)),
+ RoleLeaderChanged("DE", Some(eJoining.address))))
}
"not be produced for role leader changes in other data centers" in {
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala
index d40d765e3b..27fd842115 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala
@@ -114,8 +114,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
}
"allow join and leave with local address" in {
- val sys2 = ActorSystem("ClusterSpec2",
- ConfigFactory.parseString("""
+ val sys2 = ActorSystem(
+ "ClusterSpec2",
+ ConfigFactory.parseString("""
akka.actor.provider = "cluster"
akka.remote.netty.tcp.port = 0
akka.remote.artery.canonical.port = 0
@@ -149,8 +150,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
}
"leave via CoordinatedShutdown.run" in {
- val sys2 = ActorSystem("ClusterSpec2",
- ConfigFactory.parseString("""
+ val sys2 = ActorSystem(
+ "ClusterSpec2",
+ ConfigFactory.parseString("""
akka.actor.provider = "cluster"
akka.remote.netty.tcp.port = 0
akka.remote.artery.canonical.port = 0
@@ -178,8 +180,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
}
"terminate ActorSystem via CoordinatedShutdown.run when a stream involving StreamRefs is running" in {
- val sys2 = ActorSystem("ClusterSpec2",
- ConfigFactory.parseString("""
+ val sys2 = ActorSystem(
+ "ClusterSpec2",
+ ConfigFactory.parseString("""
akka.actor.provider = "cluster"
akka.remote.netty.tcp.port = 0
akka.remote.artery.canonical.port = 0
@@ -215,8 +218,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
}
"leave via CoordinatedShutdown.run when member status is Joining" in {
- val sys2 = ActorSystem("ClusterSpec2",
- ConfigFactory.parseString("""
+ val sys2 = ActorSystem(
+ "ClusterSpec2",
+ ConfigFactory.parseString("""
akka.actor.provider = "cluster"
akka.remote.netty.tcp.port = 0
akka.remote.artery.canonical.port = 0
@@ -245,8 +249,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
}
"terminate ActorSystem via leave (CoordinatedShutdown)" in {
- val sys2 = ActorSystem("ClusterSpec2",
- ConfigFactory.parseString("""
+ val sys2 = ActorSystem(
+ "ClusterSpec2",
+ ConfigFactory.parseString("""
akka.actor.provider = "cluster"
akka.remote.netty.tcp.port = 0
akka.remote.artery.canonical.port = 0
@@ -278,8 +283,9 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender {
}
"terminate ActorSystem via down (CoordinatedShutdown)" in {
- val sys3 = ActorSystem("ClusterSpec3",
- ConfigFactory.parseString("""
+ val sys3 = ActorSystem(
+ "ClusterSpec3",
+ ConfigFactory.parseString("""
akka.actor.provider = "cluster"
akka.remote.netty.tcp.port = 0
akka.remote.artery.canonical.port = 0
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala
index 6bcc958306..880351be2d 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala
@@ -116,9 +116,10 @@ trait ClusterTestKit extends TestKitBase {
// remove old before starting the new one
cluster.leave(cluster.readView.selfAddress)
- awaitCond(cluster.readView.status == Removed,
- message =
- s"awaiting node [${cluster.readView.selfAddress}] to be 'Removed'. Current status: [${cluster.readView.status}]")
+ awaitCond(
+ cluster.readView.status == Removed,
+ message =
+ s"awaiting node [${cluster.readView.selfAddress}] to be 'Removed'. Current status: [${cluster.readView.status}]")
shutdown(actorSystem)
awaitCond(cluster.isTerminated)
@@ -182,13 +183,14 @@ abstract class RollingUpgradeClusterSpec(config: Config) extends AkkaSpec(config
* @param enforced toggle `akka.cluster.configuration-compatibility-check.enforce-on-join` on or off
* @param shouldRejoin the condition being tested on attempted re-join: members up or terminated
*/
- def upgradeCluster(clusterSize: Int,
- baseConfig: Config,
- upgradeConfig: Config,
- timeout: FiniteDuration,
- awaitAll: FiniteDuration,
- enforced: Boolean,
- shouldRejoin: Boolean): Unit = {
+ def upgradeCluster(
+ clusterSize: Int,
+ baseConfig: Config,
+ upgradeConfig: Config,
+ timeout: FiniteDuration,
+ awaitAll: FiniteDuration,
+ enforced: Boolean,
+ shouldRejoin: Boolean): Unit = {
require(clusterSize > 1, s"'clusterSize' must be > 1 but was $clusterSize")
val util = new ClusterTestUtil(system.name)
diff --git a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala
index 5c753cb464..5e337e0bc2 100644
--- a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala
@@ -55,8 +55,9 @@ class DowningProviderSpec extends WordSpec with Matchers {
}
"use akka.cluster.AutoDowning if 'auto-down-unreachable-after' is configured" in {
- val system = ActorSystem("auto-downing",
- ConfigFactory.parseString("""
+ val system = ActorSystem(
+ "auto-downing",
+ ConfigFactory.parseString("""
akka.cluster.auto-down-unreachable-after = 18d
""").withFallback(baseConf))
Cluster(system).downingProvider shouldBe an[AutoDowning]
@@ -64,12 +65,11 @@ class DowningProviderSpec extends WordSpec with Matchers {
}
"use the specified downing provider" in {
- val system = ActorSystem("auto-downing",
- ConfigFactory
- .parseString("""
+ val system = ActorSystem(
+ "auto-downing",
+ ConfigFactory.parseString("""
akka.cluster.downing-provider-class="akka.cluster.DummyDowningProvider"
- """)
- .withFallback(baseConf))
+ """).withFallback(baseConf))
Cluster(system).downingProvider shouldBe a[DummyDowningProvider]
awaitCond(Cluster(system).downingProvider.asInstanceOf[DummyDowningProvider].actorPropsAccessed.get(), 3.seconds)
@@ -77,12 +77,11 @@ class DowningProviderSpec extends WordSpec with Matchers {
}
"stop the cluster if the downing provider throws exception in props method" in {
- val system = ActorSystem("auto-downing",
- ConfigFactory
- .parseString("""
+ val system = ActorSystem(
+ "auto-downing",
+ ConfigFactory.parseString("""
akka.cluster.downing-provider-class="akka.cluster.FailingDowningProvider"
- """)
- .withFallback(baseConf))
+ """).withFallback(baseConf))
val cluster = Cluster(system)
cluster.join(cluster.selfAddress)
diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala
index f91d9b93eb..42b6b9ff21 100644
--- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala
@@ -211,15 +211,16 @@ class GossipSpec extends WordSpec with Matchers {
"know who is youngest" in {
// a2 and e1 is Joining
val g1 =
- Gossip(members = SortedSet(a2, b1.copyUp(3), e1),
- overview =
- GossipOverview(reachability = Reachability.empty.unreachable(a2.uniqueAddress, e1.uniqueAddress)))
+ Gossip(
+ members = SortedSet(a2, b1.copyUp(3), e1),
+ overview = GossipOverview(reachability = Reachability.empty.unreachable(a2.uniqueAddress, e1.uniqueAddress)))
state(g1).youngestMember should ===(b1)
- val g2 = Gossip(members = SortedSet(a2, b1.copyUp(3), e1),
- overview = GossipOverview(
- reachability = Reachability.empty
- .unreachable(a2.uniqueAddress, b1.uniqueAddress)
- .unreachable(a2.uniqueAddress, e1.uniqueAddress)))
+ val g2 = Gossip(
+ members = SortedSet(a2, b1.copyUp(3), e1),
+ overview = GossipOverview(
+ reachability = Reachability.empty
+ .unreachable(a2.uniqueAddress, b1.uniqueAddress)
+ .unreachable(a2.uniqueAddress, e1.uniqueAddress)))
state(g2).youngestMember should ===(b1)
val g3 = Gossip(members = SortedSet(a2, b1.copyUp(3), e2.copyUp(4)))
state(g3).youngestMember should ===(e2)
@@ -352,11 +353,12 @@ class GossipSpec extends WordSpec with Matchers {
// TODO test coverage for when leaderOf returns None - I have not been able to figure it out
"clear out a bunch of stuff when removing a node" in {
- val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2d2),
- overview = GossipOverview(
- reachability = Reachability.empty
- .unreachable(dc1b1.uniqueAddress, dc2d2.uniqueAddress)
- .unreachable(dc2d2.uniqueAddress, dc1b1.uniqueAddress)))
+ val g = Gossip(
+ members = SortedSet(dc1a1, dc1b1, dc2d2),
+ overview = GossipOverview(
+ reachability = Reachability.empty
+ .unreachable(dc1b1.uniqueAddress, dc2d2.uniqueAddress)
+ .unreachable(dc2d2.uniqueAddress, dc1b1.uniqueAddress)))
.:+(VectorClock.Node(Gossip.vclockName(dc1b1.uniqueAddress)))
.:+(VectorClock.Node(Gossip.vclockName(dc2d2.uniqueAddress)))
.remove(dc1b1.uniqueAddress, System.currentTimeMillis())
diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala
index 237e34d98c..0c7995c307 100644
--- a/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala
@@ -97,8 +97,9 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers {
}
val state = MembershipState(
- Gossip(members = SortedSet(aDc1, bDc1, cDc1),
- overview = GossipOverview(reachability = Reachability.empty.unreachable(aDc1, bDc1))),
+ Gossip(
+ members = SortedSet(aDc1, bDc1, cDc1),
+ overview = GossipOverview(reachability = Reachability.empty.unreachable(aDc1, bDc1))),
aDc1,
aDc1.dataCenter,
crossDcConnections = 5)
@@ -114,9 +115,9 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers {
}
val state = MembershipState(
- Gossip(members = SortedSet(aDc1, bDc1, cDc1),
- overview =
- GossipOverview(reachability = Reachability.empty.unreachable(aDc1, bDc1).unreachable(bDc1, cDc1))),
+ Gossip(
+ members = SortedSet(aDc1, bDc1, cDc1),
+ overview = GossipOverview(reachability = Reachability.empty.unreachable(aDc1, bDc1).unreachable(bDc1, cDc1))),
aDc1,
aDc1.dataCenter,
crossDcConnections = 5)
@@ -133,13 +134,13 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers {
override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name
}
- val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3),
- overview = GossipOverview(
- reachability =
- Reachability.empty.unreachable(aDc1, eDc2).unreachable(aDc1, fDc2))),
- aDc1,
- aDc1.dataCenter,
- crossDcConnections = 5)
+ val state = MembershipState(
+ Gossip(
+ members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3),
+ overview = GossipOverview(reachability = Reachability.empty.unreachable(aDc1, eDc2).unreachable(aDc1, fDc2))),
+ aDc1,
+ aDc1.dataCenter,
+ crossDcConnections = 5)
val gossipTo = selector.gossipTargets(state)
gossipTo should ===(Vector[UniqueAddress](gDc3, hDc3))
}
@@ -150,10 +151,11 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers {
override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name
}
- val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)).seen(fDc2).seen(hDc3),
- aDc1,
- aDc1.dataCenter,
- crossDcConnections = 5)
+ val state = MembershipState(
+ Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)).seen(fDc2).seen(hDc3),
+ aDc1,
+ aDc1.dataCenter,
+ crossDcConnections = 5)
val gossipTo = selector.gossipTargets(state)
gossipTo should ===(Vector[UniqueAddress](eDc2, fDc2))
}
@@ -164,19 +166,21 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers {
override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name
}
- val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)),
- aDc1,
- aDc1.dataCenter,
- crossDcConnections = 1)
+ val state = MembershipState(
+ Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)),
+ aDc1,
+ aDc1.dataCenter,
+ crossDcConnections = 1)
val gossipTo = selector.gossipTargets(state)
gossipTo should ===(Vector[UniqueAddress](eDc2))
}
"select N random local nodes when single dc" in {
- val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, cDc1)),
- aDc1,
- aDc1.dataCenter,
- crossDcConnections = 1) // means only a e and g are oldest
+ val state = MembershipState(
+ Gossip(members = SortedSet(aDc1, bDc1, cDc1)),
+ aDc1,
+ aDc1.dataCenter,
+ crossDcConnections = 1) // means only a e and g are oldest
val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3)
@@ -184,10 +188,11 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers {
}
"select N random local nodes when not self among oldest" in {
- val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2, gDc3, hDc3)),
- bDc1,
- bDc1.dataCenter,
- crossDcConnections = 1) // means only a, e and g are oldest
+ val state = MembershipState(
+ Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2, gDc3, hDc3)),
+ bDc1,
+ bDc1.dataCenter,
+ crossDcConnections = 1) // means only a, e and g are oldest
val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3)
@@ -195,10 +200,11 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers {
}
"select N-1 random local nodes plus one cross dc oldest node when self among oldest" in {
- val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2)),
- aDc1,
- aDc1.dataCenter,
- crossDcConnections = 1) // means only a and e are oldest
+ val state = MembershipState(
+ Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2)),
+ aDc1,
+ aDc1.dataCenter,
+ crossDcConnections = 1) // means only a and e are oldest
val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3)
diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala
index 8c821e7820..1fce5bb232 100644
--- a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala
@@ -14,15 +14,17 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers {
// Test for some of the pre-build helpers we offer
"JoinConfigCompatChecker.exists" must {
- val requiredKeys = im.Seq("akka.cluster.min-nr-of-members",
- "akka.cluster.retry-unsuccessful-join-after",
- "akka.cluster.allow-weakly-up-members")
+ val requiredKeys = im.Seq(
+ "akka.cluster.min-nr-of-members",
+ "akka.cluster.retry-unsuccessful-join-after",
+ "akka.cluster.allow-weakly-up-members")
"pass when all required keys are provided" in {
val result =
- JoinConfigCompatChecker.exists(requiredKeys,
- config("""
+ JoinConfigCompatChecker.exists(
+ requiredKeys,
+ config("""
|{
| akka.cluster.min-nr-of-members = 1
| akka.cluster.retry-unsuccessful-join-after = 10s
@@ -36,8 +38,9 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers {
"fail when some required keys are NOT provided" in {
val Invalid(incompatibleKeys) =
- JoinConfigCompatChecker.exists(requiredKeys,
- config("""
+ JoinConfigCompatChecker.exists(
+ requiredKeys,
+ config("""
|{
| akka.cluster.min-nr-of-members = 1
|}
@@ -51,9 +54,10 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers {
"JoinConfigCompatChecker.fullMatch" must {
- val requiredKeys = im.Seq("akka.cluster.min-nr-of-members",
- "akka.cluster.retry-unsuccessful-join-after",
- "akka.cluster.allow-weakly-up-members")
+ val requiredKeys = im.Seq(
+ "akka.cluster.min-nr-of-members",
+ "akka.cluster.retry-unsuccessful-join-after",
+ "akka.cluster.allow-weakly-up-members")
val clusterConfig =
config("""
@@ -67,15 +71,16 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers {
"pass when all required keys are provided and all match cluster config" in {
val result =
- JoinConfigCompatChecker.fullMatch(requiredKeys,
- config("""
+ JoinConfigCompatChecker.fullMatch(
+ requiredKeys,
+ config("""
|{
| akka.cluster.min-nr-of-members = 1
| akka.cluster.retry-unsuccessful-join-after = 10s
| akka.cluster.allow-weakly-up-members = on
|}
""".stripMargin),
- clusterConfig)
+ clusterConfig)
result shouldBe Valid
}
@@ -83,13 +88,14 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers {
"fail when some required keys are NOT provided" in {
val Invalid(incompatibleKeys) =
- JoinConfigCompatChecker.fullMatch(requiredKeys,
- config("""
+ JoinConfigCompatChecker.fullMatch(
+ requiredKeys,
+ config("""
|{
| akka.cluster.min-nr-of-members = 1
|}
""".stripMargin),
- clusterConfig)
+ clusterConfig)
incompatibleKeys should have size 2
incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing")
@@ -99,15 +105,16 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers {
"fail when all required keys are passed, but some values don't match cluster config" in {
val Invalid(incompatibleKeys) =
- JoinConfigCompatChecker.fullMatch(requiredKeys,
- config("""
+ JoinConfigCompatChecker.fullMatch(
+ requiredKeys,
+ config("""
|{
| akka.cluster.min-nr-of-members = 1
| akka.cluster.retry-unsuccessful-join-after = 15s
| akka.cluster.allow-weakly-up-members = off
|}
""".stripMargin),
- clusterConfig)
+ clusterConfig)
incompatibleKeys should have size 2
incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is incompatible")
@@ -117,14 +124,15 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers {
"fail when all required keys are passed, but some are missing and others don't match cluster config" in {
val Invalid(incompatibleKeys) =
- JoinConfigCompatChecker.fullMatch(requiredKeys,
- config("""
+ JoinConfigCompatChecker.fullMatch(
+ requiredKeys,
+ config("""
|{
| akka.cluster.min-nr-of-members = 1
| akka.cluster.allow-weakly-up-members = off
|}
""".stripMargin),
- clusterConfig)
+ clusterConfig)
incompatibleKeys should have size 2
incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing")
diff --git a/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala
index 2e105416d5..6ae5ffe0a0 100644
--- a/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala
@@ -51,41 +51,48 @@ class MembershipStateSpec extends WordSpec with Matchers {
}
"find two oldest per role as targets for Exiting change" in {
- val a5 = TestMember(Address("akka.tcp", "sys", "a5", 2552),
- MemberStatus.Exiting,
- roles = Set("role1", "role2"),
- upNumber = 5,
- dataCenter = "dc-a")
- val a6 = TestMember(Address("akka.tcp", "sys", "a6", 2552),
- MemberStatus.Exiting,
- roles = Set("role1", "role3"),
- upNumber = 6,
- dataCenter = "dc-a")
- val a7 = TestMember(Address("akka.tcp", "sys", "a7", 2552),
- MemberStatus.Exiting,
- roles = Set("role1"),
- upNumber = 7,
- dataCenter = "dc-a")
- val a8 = TestMember(Address("akka.tcp", "sys", "a8", 2552),
- MemberStatus.Exiting,
- roles = Set("role1"),
- upNumber = 8,
- dataCenter = "dc-a")
- val a9 = TestMember(Address("akka.tcp", "sys", "a9", 2552),
- MemberStatus.Exiting,
- roles = Set("role2"),
- upNumber = 9,
- dataCenter = "dc-a")
- val b5 = TestMember(Address("akka.tcp", "sys", "b5", 2552),
- MemberStatus.Exiting,
- roles = Set("role1"),
- upNumber = 5,
- dataCenter = "dc-b")
- val b6 = TestMember(Address("akka.tcp", "sys", "b6", 2552),
- MemberStatus.Exiting,
- roles = Set("role2"),
- upNumber = 6,
- dataCenter = "dc-b")
+ val a5 = TestMember(
+ Address("akka.tcp", "sys", "a5", 2552),
+ MemberStatus.Exiting,
+ roles = Set("role1", "role2"),
+ upNumber = 5,
+ dataCenter = "dc-a")
+ val a6 = TestMember(
+ Address("akka.tcp", "sys", "a6", 2552),
+ MemberStatus.Exiting,
+ roles = Set("role1", "role3"),
+ upNumber = 6,
+ dataCenter = "dc-a")
+ val a7 = TestMember(
+ Address("akka.tcp", "sys", "a7", 2552),
+ MemberStatus.Exiting,
+ roles = Set("role1"),
+ upNumber = 7,
+ dataCenter = "dc-a")
+ val a8 = TestMember(
+ Address("akka.tcp", "sys", "a8", 2552),
+ MemberStatus.Exiting,
+ roles = Set("role1"),
+ upNumber = 8,
+ dataCenter = "dc-a")
+ val a9 = TestMember(
+ Address("akka.tcp", "sys", "a9", 2552),
+ MemberStatus.Exiting,
+ roles = Set("role2"),
+ upNumber = 9,
+ dataCenter = "dc-a")
+ val b5 = TestMember(
+ Address("akka.tcp", "sys", "b5", 2552),
+ MemberStatus.Exiting,
+ roles = Set("role1"),
+ upNumber = 5,
+ dataCenter = "dc-b")
+ val b6 = TestMember(
+ Address("akka.tcp", "sys", "b6", 2552),
+ MemberStatus.Exiting,
+ roles = Set("role2"),
+ upNumber = 6,
+ dataCenter = "dc-b")
val theExiting = Set(a5, a6)
val gossip = Gossip(SortedSet(a1, a2, a3, a4, a5, a6, a7, a8, a9, b1, b2, b3, b5, b6))
val membershipState = MembershipState(gossip, a1.uniqueAddress, "dc-a", 2)
diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala
index 6462030529..d20b87dfd9 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala
@@ -40,10 +40,11 @@ class ReachabilityPerfSpec extends WordSpec with Matchers {
val reachability3 = addUnreachable(reachability1, nodesSize / 2)
val allowed = reachability1.versions.keySet
- private def checkThunkFor(r1: Reachability,
- r2: Reachability,
- thunk: (Reachability, Reachability) => Unit,
- times: Int): Unit = {
+ private def checkThunkFor(
+ r1: Reachability,
+ r2: Reachability,
+ thunk: (Reachability, Reachability) => Unit,
+ times: Int): Unit = {
for (i <- 1 to times) {
thunk(Reachability(r1.records, r1.versions), Reachability(r2.records, r2.versions))
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala
index 559f51ba98..7271bae31f 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala
@@ -95,16 +95,18 @@ class ReachabilitySpec extends WordSpec with Matchers {
val r2 = r.unreachable(nodeB, nodeD).unreachable(nodeB, nodeE)
r2.records.toSet should ===(
- Set(Record(nodeD, nodeC, Unreachable, 1L),
- Record(nodeB, nodeD, Unreachable, 5L),
- Record(nodeB, nodeE, Unreachable, 6L)))
+ Set(
+ Record(nodeD, nodeC, Unreachable, 1L),
+ Record(nodeB, nodeD, Unreachable, 5L),
+ Record(nodeB, nodeE, Unreachable, 6L)))
}
"have correct aggregated status" in {
- val records = Vector(Reachability.Record(nodeA, nodeB, Reachable, 2),
- Reachability.Record(nodeC, nodeB, Unreachable, 2),
- Reachability.Record(nodeA, nodeD, Unreachable, 3),
- Reachability.Record(nodeD, nodeB, Terminated, 4))
+ val records = Vector(
+ Reachability.Record(nodeA, nodeB, Reachable, 2),
+ Reachability.Record(nodeC, nodeB, Unreachable, 2),
+ Reachability.Record(nodeA, nodeD, Unreachable, 3),
+ Reachability.Record(nodeD, nodeB, Terminated, 4))
val versions = Map(nodeA -> 3L, nodeC -> 3L, nodeD -> 4L)
val r = Reachability(records, versions)
r.status(nodeA) should ===(Reachable)
diff --git a/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala
index 45c21b20e0..1c6755fc19 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala
@@ -59,9 +59,9 @@ class ResetSystemMessageSeqNrSpec extends ArteryMultiNodeSpec("""
expectTerminated(echo1)
shutdown(sys2)
- val sys3 = newRemoteSystem(name = Some(system.name),
- extraConfig =
- Some(s"akka.remote.artery.canonical.port=${Cluster(sys2).selfAddress.port.get}"))
+ val sys3 = newRemoteSystem(
+ name = Some(system.name),
+ extraConfig = Some(s"akka.remote.artery.canonical.port=${Cluster(sys2).selfAddress.port.get}"))
Cluster(sys3).join(Cluster(system).selfAddress)
within(10.seconds) {
awaitAssert {
diff --git a/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala
index e8bdd6e048..116f29fbf6 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala
@@ -49,8 +49,9 @@ class ShutdownAfterJoinSeedNodesSpec extends AkkaSpec(ShutdownAfterJoinSeedNodes
Cluster(oridinary1).joinSeedNodes(seedNodes)
Await.result(seed2.whenTerminated, Cluster(seed2).settings.ShutdownAfterUnsuccessfulJoinSeedNodes + 10.second)
- Await.result(oridinary1.whenTerminated,
- Cluster(seed2).settings.ShutdownAfterUnsuccessfulJoinSeedNodes + 10.second)
+ Await.result(
+ oridinary1.whenTerminated,
+ Cluster(seed2).settings.ShutdownAfterUnsuccessfulJoinSeedNodes + 10.second)
}
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala
index 4e50e2a568..3551be1138 100644
--- a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala
@@ -13,17 +13,19 @@ object TestMember {
def apply(address: Address, status: MemberStatus, upNumber: Int, dc: ClusterSettings.DataCenter): Member =
apply(address, status, Set.empty, dc, upNumber)
- def apply(address: Address,
- status: MemberStatus,
- roles: Set[String],
- dataCenter: ClusterSettings.DataCenter = ClusterSettings.DefaultDataCenter,
- upNumber: Int = Int.MaxValue): Member =
+ def apply(
+ address: Address,
+ status: MemberStatus,
+ roles: Set[String],
+ dataCenter: ClusterSettings.DataCenter = ClusterSettings.DefaultDataCenter,
+ upNumber: Int = Int.MaxValue): Member =
withUniqueAddress(UniqueAddress(address, 0L), status, roles, dataCenter, upNumber)
- def withUniqueAddress(uniqueAddress: UniqueAddress,
- status: MemberStatus,
- roles: Set[String],
- dataCenter: ClusterSettings.DataCenter,
- upNumber: Int = Int.MaxValue): Member =
+ def withUniqueAddress(
+ uniqueAddress: UniqueAddress,
+ status: MemberStatus,
+ roles: Set[String],
+ dataCenter: ClusterSettings.DataCenter,
+ upNumber: Int = Int.MaxValue): Member =
new Member(uniqueAddress, upNumber, status, roles + (ClusterSettings.DcRolePrefix + dataCenter))
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
index c9f346bea7..3783a24708 100644
--- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
@@ -101,8 +101,9 @@ class ClusterMessageSerializerSpec extends AkkaSpec("akka.actor.provider = clust
"deserialize from wire format of version 2.5.9 (using serialized address for InitJoinAck)" in {
// we must use the old singleton class name so that the other side will see an InitJoin
// but discard the config as it does not know about the config check
- val initJoinAck = InternalClusterAction.InitJoinAck(Address("akka.tcp", "cluster", "127.0.0.1", 2552),
- InternalClusterAction.UncheckedConfig)
+ val initJoinAck = InternalClusterAction.InitJoinAck(
+ Address("akka.tcp", "cluster", "127.0.0.1", 2552),
+ InternalClusterAction.UncheckedConfig)
val serializedInitJoinAckPre2510 = serializer.addressToProto(initJoinAck.address).build().toByteArray
val deserialized =
@@ -111,8 +112,9 @@ class ClusterMessageSerializerSpec extends AkkaSpec("akka.actor.provider = clust
}
"serialize to wire format of version 2.5.9 (using serialized address for InitJoinAck)" in {
- val initJoinAck = InternalClusterAction.InitJoinAck(Address("akka.tcp", "cluster", "127.0.0.1", 2552),
- InternalClusterAction.ConfigCheckUnsupportedByJoiningNode)
+ val initJoinAck = InternalClusterAction.InitJoinAck(
+ Address("akka.tcp", "cluster", "127.0.0.1", 2552),
+ InternalClusterAction.ConfigCheckUnsupportedByJoiningNode)
val bytes = serializer.toBinary(initJoinAck)
val expectedSerializedInitJoinAckPre2510 = serializer.addressToProto(initJoinAck.address).build().toByteArray
@@ -175,21 +177,24 @@ class ClusterMessageSerializerSpec extends AkkaSpec("akka.actor.provider = clust
"be serializable with one role" in {
checkSerialization(
- ClusterRouterPool(RoundRobinPool(nrOfInstances = 4),
- ClusterRouterPoolSettings(totalInstances = 2,
- maxInstancesPerNode = 5,
- allowLocalRoutees = true,
- useRoles = Set("Richard, Duke of Gloucester"))))
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 4),
+ ClusterRouterPoolSettings(
+ totalInstances = 2,
+ maxInstancesPerNode = 5,
+ allowLocalRoutees = true,
+ useRoles = Set("Richard, Duke of Gloucester"))))
}
"be serializable with many roles" in {
checkSerialization(
- ClusterRouterPool(RoundRobinPool(nrOfInstances = 4),
- ClusterRouterPoolSettings(
- totalInstances = 2,
- maxInstancesPerNode = 5,
- allowLocalRoutees = true,
- useRoles = Set("Richard, Duke of Gloucester", "Hongzhi Emperor", "Red Rackham"))))
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 4),
+ ClusterRouterPoolSettings(
+ totalInstances = 2,
+ maxInstancesPerNode = 5,
+ allowLocalRoutees = true,
+ useRoles = Set("Richard, Duke of Gloucester", "Hongzhi Emperor", "Red Rackham"))))
}
}
diff --git a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala
index 50a9acdef6..694bb463a8 100644
--- a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala
+++ b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala
@@ -41,21 +41,23 @@ object CircuitBreakerProxy {
* @param failureMap function to map a failure into a response message. The failing response message is wrapped
* into a [[akka.contrib.circuitbreaker.CircuitBreakerProxy.CircuitOpenFailure]] object
*/
- def props(target: ActorRef,
- maxFailures: Int,
- callTimeout: Timeout,
- resetTimeout: Timeout,
- circuitEventListener: Option[ActorRef],
- failureDetector: Any => Boolean,
- failureMap: CircuitOpenFailure => Any) =
+ def props(
+ target: ActorRef,
+ maxFailures: Int,
+ callTimeout: Timeout,
+ resetTimeout: Timeout,
+ circuitEventListener: Option[ActorRef],
+ failureDetector: Any => Boolean,
+ failureMap: CircuitOpenFailure => Any) =
Props(
- new CircuitBreakerProxy(target,
- maxFailures,
- callTimeout,
- resetTimeout,
- circuitEventListener,
- failureDetector,
- failureMap))
+ new CircuitBreakerProxy(
+ target,
+ maxFailures,
+ callTimeout,
+ resetTimeout,
+ circuitEventListener,
+ failureDetector,
+ failureMap))
sealed trait CircuitBreakerCommand
@@ -77,14 +79,15 @@ object CircuitBreakerProxy {
final case class CircuitBreakerStateData(failureCount: Int = 0, firstHalfOpenMessageSent: Boolean = false)
- final case class CircuitBreakerPropsBuilder(maxFailures: Int,
- callTimeout: Timeout,
- resetTimeout: Timeout,
- circuitEventListener: Option[ActorRef] = None,
- failureDetector: Any => Boolean = { _ =>
- false
- },
- openCircuitFailureConverter: CircuitOpenFailure => Any = identity) {
+ final case class CircuitBreakerPropsBuilder(
+ maxFailures: Int,
+ callTimeout: Timeout,
+ resetTimeout: Timeout,
+ circuitEventListener: Option[ActorRef] = None,
+ failureDetector: Any => Boolean = { _ =>
+ false
+ },
+ openCircuitFailureConverter: CircuitOpenFailure => Any = identity) {
def withMaxFailures(value: Int) = copy(maxFailures = value)
def withCallTimeout(value: Timeout) = copy(callTimeout = value)
@@ -99,13 +102,14 @@ object CircuitBreakerProxy {
* @param target the target actor ref
*/
def props(target: ActorRef) =
- CircuitBreakerProxy.props(target,
- maxFailures,
- callTimeout,
- resetTimeout,
- circuitEventListener,
- failureDetector,
- openCircuitFailureConverter)
+ CircuitBreakerProxy.props(
+ target,
+ maxFailures,
+ callTimeout,
+ resetTimeout,
+ circuitEventListener,
+ failureDetector,
+ openCircuitFailureConverter)
}
@@ -119,13 +123,14 @@ object CircuitBreakerProxy {
import akka.contrib.circuitbreaker.CircuitBreakerProxy._
@deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0")
-final class CircuitBreakerProxy(target: ActorRef,
- maxFailures: Int,
- callTimeout: Timeout,
- resetTimeout: Timeout,
- circuitEventListener: Option[ActorRef],
- failureDetector: Any => Boolean,
- failureMap: CircuitOpenFailure => Any)
+final class CircuitBreakerProxy(
+ target: ActorRef,
+ maxFailures: Int,
+ callTimeout: Timeout,
+ resetTimeout: Timeout,
+ circuitEventListener: Option[ActorRef],
+ failureDetector: Any => Boolean,
+ failureMap: CircuitOpenFailure => Any)
extends Actor
with ActorLogging
with FSM[CircuitBreakerState, CircuitBreakerStateData] {
@@ -199,16 +204,18 @@ final class CircuitBreakerProxy(target: ActorRef,
stay
case Event(openNotification @ CircuitOpenFailure(_), _) =>
- log.warning("Unexpected circuit open notification {} sent to myself. Please report this as a bug.",
- openNotification)
+ log.warning(
+ "Unexpected circuit open notification {} sent to myself. Please report this as a bug.",
+ openNotification)
stay
case Event(message, state) =>
val failureNotification = failureMap(CircuitOpenFailure(message))
- log.debug("OPEN: Failing request for message {}, sending failure notification {} to sender {}",
- message,
- failureNotification,
- sender)
+ log.debug(
+ "OPEN: Failing request for message {}, sending failure notification {} to sender {}",
+ message,
+ failureNotification,
+ sender)
sender ! failureNotification
stay
@@ -237,10 +244,11 @@ final class CircuitBreakerProxy(target: ActorRef,
case Event(message, CircuitBreakerStateData(_, true)) =>
val failureNotification = failureMap(CircuitOpenFailure(message))
- log.debug("HALF-OPEN: Failing request for message {}, sending failure notification {} to sender {}",
- message,
- failureNotification,
- sender)
+ log.debug(
+ "HALF-OPEN: Failing request for message {}, sending failure notification {} to sender {}",
+ message,
+ failureNotification,
+ sender)
sender ! failureNotification
stay
}
@@ -251,9 +259,10 @@ final class CircuitBreakerProxy(target: ActorRef,
target.ask(message)(callTimeout).onComplete {
case Success(response) =>
- log.debug("Request '{}' has been replied to with response {}, forwarding to original sender {}",
- message,
- currentSender)
+ log.debug(
+ "Request '{}' has been replied to with response {}, forwarding to original sender {}",
+ message,
+ currentSender)
currentSender ! response
diff --git a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala
index c6ef612b9c..7a6795cc87 100644
--- a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala
+++ b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala
@@ -49,8 +49,9 @@ object Implicits {
*/
@throws[akka.contrib.circuitbreaker.OpenCircuitException](
"if the call failed because the circuit breaker proxy state was OPEN")
- def askWithCircuitBreaker(circuitBreakerProxy: ActorRef, message: Any)(implicit executionContext: ExecutionContext,
- timeout: Timeout): Future[Any] =
+ def askWithCircuitBreaker(circuitBreakerProxy: ActorRef, message: Any)(
+ implicit executionContext: ExecutionContext,
+ timeout: Timeout): Future[Any] =
circuitBreakerProxy.internalAskWithCircuitBreaker(message, timeout, ActorRef.noSender)
/**
@@ -89,9 +90,10 @@ final class CircuitBreakerAwareFuture(val future: Future[Any]) extends AnyVal {
@deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0")
final class AskeableWithCircuitBreakerActor(val actorRef: ActorRef) extends AnyVal {
- def askWithCircuitBreaker(message: Any)(implicit executionContext: ExecutionContext,
- timeout: Timeout,
- sender: ActorRef = Actor.noSender): Future[Any] =
+ def askWithCircuitBreaker(message: Any)(
+ implicit executionContext: ExecutionContext,
+ timeout: Timeout,
+ sender: ActorRef = Actor.noSender): Future[Any] =
internalAskWithCircuitBreaker(message, timeout, sender)
@throws[OpenCircuitException]
@@ -106,9 +108,10 @@ final class AskeableWithCircuitBreakerActor(val actorRef: ActorRef) extends AnyV
@deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0")
final class AskeableWithCircuitBreakerActorSelection(val actorSelection: ActorSelection) extends AnyVal {
- def askWithCircuitBreaker(message: Any)(implicit executionContext: ExecutionContext,
- timeout: Timeout,
- sender: ActorRef = Actor.noSender): Future[Any] =
+ def askWithCircuitBreaker(message: Any)(
+ implicit executionContext: ExecutionContext,
+ timeout: Timeout,
+ sender: ActorRef = Actor.noSender): Future[Any] =
internalAskWithCircuitBreaker(message, timeout, sender)
private[circuitbreaker] def internalAskWithCircuitBreaker(message: Any, timeout: Timeout, sender: ActorRef)(
diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala
index 17f7430fb2..fe3b5c4284 100644
--- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala
+++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala
@@ -17,10 +17,11 @@ object ReliableProxy {
* Scala API Props. Arguments are detailed in the [[akka.contrib.pattern.ReliableProxy]]
* constructor.
*/
- def props(targetPath: ActorPath,
- retryAfter: FiniteDuration,
- reconnectAfter: Option[FiniteDuration],
- maxReconnects: Option[Int]): Props = {
+ def props(
+ targetPath: ActorPath,
+ retryAfter: FiniteDuration,
+ reconnectAfter: Option[FiniteDuration],
+ maxReconnects: Option[Int]): Props = {
Props(new ReliableProxy(targetPath, retryAfter, reconnectAfter, maxReconnects))
}
@@ -28,10 +29,11 @@ object ReliableProxy {
* Java API Props. Arguments are detailed in the [[akka.contrib.pattern.ReliableProxy]]
* constructor.
*/
- def props(targetPath: ActorPath,
- retryAfter: FiniteDuration,
- reconnectAfter: FiniteDuration,
- maxReconnects: Int): Props = {
+ def props(
+ targetPath: ActorPath,
+ retryAfter: FiniteDuration,
+ reconnectAfter: FiniteDuration,
+ maxReconnects: Int): Props = {
props(targetPath, retryAfter, Option(reconnectAfter), if (maxReconnects > 0) Some(maxReconnects) else None)
}
@@ -231,10 +233,11 @@ import ReliableProxy._
* target actor. Use `None` for no limit. If `reconnectAfter` is `None` this value is ignored.
*/
@deprecated("Use AtLeastOnceDelivery instead", "2.5.0")
-class ReliableProxy(targetPath: ActorPath,
- retryAfter: FiniteDuration,
- reconnectAfter: Option[FiniteDuration],
- maxConnectAttempts: Option[Int])
+class ReliableProxy(
+ targetPath: ActorPath,
+ retryAfter: FiniteDuration,
+ reconnectAfter: Option[FiniteDuration],
+ maxConnectAttempts: Option[Int])
extends Actor
with LoggingFSM[State, Vector[Message]]
with ReliableProxyDebugLogging {
diff --git a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala
index 45c2648ff8..eda603ef46 100644
--- a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala
@@ -269,8 +269,9 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen {
resetTimeoutExpires()
And("Receiving a successful response")
- receiverRespondsToRequestWith("First message in half-open state, should be forwarded",
- "This should close the circuit")
+ receiverRespondsToRequestWith(
+ "First message in half-open state, should be forwarded",
+ "This should close the circuit")
circuitBreakerReceivesSelfNotificationMessage()
@@ -328,8 +329,9 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen {
eventListener.expectMsg(CircuitHalfOpen(circuitBreaker))
When("Entering CLOSED state")
- receiverRespondsToRequestWith("First message in half-open state, should be forwarded",
- "This should close the circuit")
+ receiverRespondsToRequestWith(
+ "First message in half-open state, should be forwarded",
+ "This should close the circuit")
Then("An event is sent")
eventListener.expectMsg(CircuitClosed(circuitBreaker))
diff --git a/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala b/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala
index 4760dd0b1e..63b8b99f47 100644
--- a/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala
@@ -115,8 +115,9 @@ class MyActor extends Actor {
}
object MyApp extends App {
- val system = ActorSystem("MySystem",
- ConfigFactory.parseString("""
+ val system = ActorSystem(
+ "MySystem",
+ ConfigFactory.parseString("""
peek-dispatcher {
mailbox-type = "akka.contrib.mailbox.PeekMailboxType"
max-retries = 2
diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala
index 219273c6eb..50e2382913 100644
--- a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala
@@ -399,10 +399,11 @@ object MixinSample extends App {
val system = ActorSystem("pipeline")
//#mixin-model
- val texts = Map("that.rug_EN" -> "That rug really tied the room together.",
- "your.opinion_EN" -> "Yeah, well, you know, that's just, like, your opinion, man.",
- "that.rug_ES" -> "Esa alfombra realmente completaba la sala.",
- "your.opinion_ES" -> "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.")
+ val texts = Map(
+ "that.rug_EN" -> "That rug really tied the room together.",
+ "your.opinion_EN" -> "Yeah, well, you know, that's just, like, your opinion, man.",
+ "that.rug_ES" -> "Esa alfombra realmente completaba la sala.",
+ "your.opinion_ES" -> "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.")
case class I18nText(locale: String, key: String)
case class Message(author: Option[String], text: Any)
diff --git a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala
index abf7aea694..82c4eee94e 100644
--- a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala
+++ b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala
@@ -150,9 +150,10 @@ final class Lookup(val serviceName: String, val portName: Option[String], val pr
def getProtocol: Optional[String] =
protocol.asJava
- private def copy(serviceName: String = serviceName,
- portName: Option[String] = portName,
- protocol: Option[String] = protocol): Lookup =
+ private def copy(
+ serviceName: String = serviceName,
+ portName: Option[String] = portName,
+ protocol: Option[String] = protocol): Lookup =
new Lookup(serviceName, portName, protocol)
override def toString: String = s"Lookup($serviceName,$portName,$protocol)"
diff --git a/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala b/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala
index 80e7ef6e15..8185a96735 100644
--- a/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala
+++ b/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala
@@ -29,8 +29,9 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers {
"select implementation from config by config name (inside akka.discovery namespace)" in {
val className = classOf[FakeTestDiscovery].getCanonicalName
- val sys = ActorSystem("DiscoveryConfigurationSpec",
- ConfigFactory.parseString(s"""
+ val sys = ActorSystem(
+ "DiscoveryConfigurationSpec",
+ ConfigFactory.parseString(s"""
akka.discovery {
method = akka-mock-inside
@@ -48,8 +49,9 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers {
val className1 = classOf[FakeTestDiscovery].getCanonicalName
val className2 = classOf[FakeTestDiscovery2].getCanonicalName
- val sys = ActorSystem("DiscoveryConfigurationSpec",
- ConfigFactory.parseString(s"""
+ val sys = ActorSystem(
+ "DiscoveryConfigurationSpec",
+ ConfigFactory.parseString(s"""
akka.discovery {
method = mock1
@@ -72,8 +74,9 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers {
val className1 = classOf[FakeTestDiscovery].getCanonicalName
val className2 = classOf[FakeTestDiscovery2].getCanonicalName
- val sys = ActorSystem("DiscoveryConfigurationSpec",
- ConfigFactory.parseString(s"""
+ val sys = ActorSystem(
+ "DiscoveryConfigurationSpec",
+ ConfigFactory.parseString(s"""
akka.discovery {
method = mock1
@@ -97,8 +100,9 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers {
"throw a specific discovery method exception" in {
val className = classOf[ExceptionThrowingDiscovery].getCanonicalName
- val sys = ActorSystem("DiscoveryConfigurationSpec",
- ConfigFactory.parseString(s"""
+ val sys = ActorSystem(
+ "DiscoveryConfigurationSpec",
+ ConfigFactory.parseString(s"""
akka.discovery {
method = "mock1"
mock1 {
@@ -115,8 +119,9 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers {
"throw an illegal argument exception for not existing method" in {
val className = "className"
- val sys = ActorSystem("DiscoveryConfigurationSpec",
- ConfigFactory.parseString(s"""
+ val sys = ActorSystem(
+ "DiscoveryConfigurationSpec",
+ ConfigFactory.parseString(s"""
akka.discovery {
method = "$className"
}
diff --git a/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala b/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala
index 0939990843..4ea760c848 100644
--- a/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala
+++ b/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala
@@ -10,19 +10,21 @@ class LookupSpec extends WordSpec with Matchers with OptionValues {
// SRV strings with invalid domain names
// should fail to build lookups
- val srvWithInvalidDomainNames = List("_portName._protocol.service_name.local",
- "_portName._protocol.servicename,local",
- "_portName._protocol.servicename.local-",
- "_portName._protocol.-servicename.local")
+ val srvWithInvalidDomainNames = List(
+ "_portName._protocol.service_name.local",
+ "_portName._protocol.servicename,local",
+ "_portName._protocol.servicename.local-",
+ "_portName._protocol.-servicename.local")
// No SRV that should result in simple A/AAAA lookups
- val noSrvLookups = List("portName.protocol.serviceName.local",
- "serviceName.local",
- "_portName.serviceName",
- "_serviceName.local",
- "_serviceName,local",
- "-serviceName.local",
- "serviceName.local-")
+ val noSrvLookups = List(
+ "portName.protocol.serviceName.local",
+ "serviceName.local",
+ "_portName.serviceName",
+ "_serviceName.local",
+ "_serviceName,local",
+ "-serviceName.local",
+ "serviceName.local-")
"Lookup.parseSrv" should {
diff --git a/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala
index 21d0f6dd5a..7aea4b5bf4 100644
--- a/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala
+++ b/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala
@@ -21,8 +21,9 @@ class StubbedServiceDiscovery(system: ExtendedActorSystem) extends ServiceDiscov
override def lookup(query: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = {
if (query.serviceName == "stubbed") {
Future.successful(
- Resolved(query.serviceName,
- immutable.Seq(ResolvedTarget(host = "stubbed1", port = Some(1234), address = None))))
+ Resolved(
+ query.serviceName,
+ immutable.Seq(ResolvedTarget(host = "stubbed1", port = Some(1234), address = None))))
} else if (query.serviceName == "fail") {
Future.failed(new RuntimeException("No resolving for you!"))
} else {
@@ -89,22 +90,26 @@ class AggregateServiceDiscoverySpec
"only call first one if returns results" in {
val results = discovery.lookup("stubbed", 100.millis).futureValue
- results shouldEqual Resolved("stubbed",
- immutable.Seq(ResolvedTarget(host = "stubbed1", port = Some(1234), address = None)))
+ results shouldEqual Resolved(
+ "stubbed",
+ immutable.Seq(ResolvedTarget(host = "stubbed1", port = Some(1234), address = None)))
}
"move onto the next if no resolved targets" in {
val results = discovery.lookup("config1", 100.millis).futureValue
- results shouldEqual Resolved("config1",
- immutable.Seq(ResolvedTarget(host = "cat", port = Some(1233), address = None),
- ResolvedTarget(host = "dog", port = Some(1234), address = None)))
+ results shouldEqual Resolved(
+ "config1",
+ immutable.Seq(
+ ResolvedTarget(host = "cat", port = Some(1233), address = None),
+ ResolvedTarget(host = "dog", port = Some(1234), address = None)))
}
"move onto next if fails" in {
val results = discovery.lookup("fail", 100.millis).futureValue
// Stub fails then result comes from config
- results shouldEqual Resolved("fail",
- immutable.Seq(ResolvedTarget(host = "from-config", port = None, address = None)))
+ results shouldEqual Resolved(
+ "fail",
+ immutable.Seq(ResolvedTarget(host = "from-config", port = None, address = None)))
}
}
diff --git a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala
index 647cb0a72b..ca3a9981e3 100644
--- a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala
+++ b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala
@@ -63,8 +63,9 @@ class ConfigServiceDiscoverySpec
"load from config" in {
val result = discovery.lookup("service1", 100.millis).futureValue
result.serviceName shouldEqual "service1"
- result.addresses shouldEqual immutable.Seq(ResolvedTarget(host = "cat", port = Some(1233), address = None),
- ResolvedTarget(host = "dog", port = None, address = None))
+ result.addresses shouldEqual immutable.Seq(
+ ResolvedTarget(host = "cat", port = Some(1233), address = None),
+ ResolvedTarget(host = "dog", port = None, address = None))
}
"return no resolved targets if not in config" in {
diff --git a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala
index 677666079a..5d05e3dc04 100644
--- a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala
+++ b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala
@@ -40,11 +40,11 @@ class ConfigServicesParserSpec extends WordSpec with Matchers {
val result = ConfigServicesParser.parse(config)
- result("service1") shouldEqual Resolved("service1",
- immutable.Seq(ResolvedTarget(host = "cat",
- port = Some(1233),
- address = None),
- ResolvedTarget(host = "dog", port = None, address = None)))
+ result("service1") shouldEqual Resolved(
+ "service1",
+ immutable.Seq(
+ ResolvedTarget(host = "cat", port = Some(1233), address = None),
+ ResolvedTarget(host = "dog", port = None, address = None)))
result("service2") shouldEqual Resolved("service2", immutable.Seq())
}
}
diff --git a/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala
index e93b8f7477..574d824032 100644
--- a/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala
+++ b/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala
@@ -56,9 +56,10 @@ class DnsDiscoverySpec extends AkkaSpec(DnsDiscoverySpec.config) with DockerBind
.lookup(Lookup("foo.test.").withPortName("service").withProtocol("tcp"), resolveTimeout = 10.seconds)
.futureValue
- val expected = Set(ResolvedTarget("a-single.foo.test", Some(5060), Some(InetAddress.getByName("192.168.1.20"))),
- ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.21"))),
- ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.22"))))
+ val expected = Set(
+ ResolvedTarget("a-single.foo.test", Some(5060), Some(InetAddress.getByName("192.168.1.20"))),
+ ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.21"))),
+ ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.22"))))
val result1 = lookup()
result1.addresses.toSet shouldEqual expected
diff --git a/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala
index 7e756530a6..6a77a76576 100644
--- a/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala
+++ b/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala
@@ -18,24 +18,13 @@ import scala.concurrent.duration._
class DnsServiceDiscoverySpec extends WordSpec with Matchers {
"srvRecordsToResolved" must {
"fill in ips from A records" in {
- val resolved = DnsProtocol.Resolved("cats.com",
- im.Seq(
- new SRVRecord("cats.com",
- Ttl.fromPositive(1.second),
- 2,
- 3,
- 4,
- "kittens.com")),
- im.Seq(
- new ARecord("kittens.com",
- Ttl.fromPositive(1.second),
- InetAddress.getByName("127.0.0.2")),
- new ARecord("kittens.com",
- Ttl.fromPositive(1.second),
- InetAddress.getByName("127.0.0.3")),
- new ARecord("donkeys.com",
- Ttl.fromPositive(1.second),
- InetAddress.getByName("127.0.0.4"))))
+ val resolved = DnsProtocol.Resolved(
+ "cats.com",
+ im.Seq(new SRVRecord("cats.com", Ttl.fromPositive(1.second), 2, 3, 4, "kittens.com")),
+ im.Seq(
+ new ARecord("kittens.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.2")),
+ new ARecord("kittens.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.3")),
+ new ARecord("donkeys.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.4"))))
val result: ServiceDiscovery.Resolved =
DnsServiceDiscovery.srvRecordsToResolved("cats.com", resolved)
@@ -48,18 +37,10 @@ class DnsServiceDiscoverySpec extends WordSpec with Matchers {
// Naughty DNS server
"use SRV target and port if no additional records" in {
- val resolved = DnsProtocol.Resolved("cats.com",
- im.Seq(
- new SRVRecord("cats.com",
- Ttl.fromPositive(1.second),
- 2,
- 3,
- 8080,
- "kittens.com")),
- im.Seq(
- new ARecord("donkeys.com",
- Ttl.fromPositive(1.second),
- InetAddress.getByName("127.0.0.4"))))
+ val resolved = DnsProtocol.Resolved(
+ "cats.com",
+ im.Seq(new SRVRecord("cats.com", Ttl.fromPositive(1.second), 2, 3, 8080, "kittens.com")),
+ im.Seq(new ARecord("donkeys.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.4"))))
val result =
DnsServiceDiscovery.srvRecordsToResolved("cats.com", resolved)
@@ -68,31 +49,30 @@ class DnsServiceDiscoverySpec extends WordSpec with Matchers {
}
"fill in ips from AAAA records" in {
- val resolved = DnsProtocol.Resolved("cats.com",
- im.Seq(
- new SRVRecord("cats1.com",
- Ttl.fromPositive(1.second),
- 2,
- 3,
- 4,
- "kittens.com")),
- im.Seq(
- new AAAARecord("kittens.com",
- Ttl.fromPositive(2.seconds),
- InetAddress.getByName("::1").asInstanceOf[Inet6Address]),
- new AAAARecord("kittens.com",
- Ttl.fromPositive(2.seconds),
- InetAddress.getByName("::2").asInstanceOf[Inet6Address]),
- new AAAARecord("donkeys.com",
- Ttl.fromPositive(2.seconds),
- InetAddress.getByName("::3").asInstanceOf[Inet6Address])))
+ val resolved = DnsProtocol.Resolved(
+ "cats.com",
+ im.Seq(new SRVRecord("cats1.com", Ttl.fromPositive(1.second), 2, 3, 4, "kittens.com")),
+ im.Seq(
+ new AAAARecord(
+ "kittens.com",
+ Ttl.fromPositive(2.seconds),
+ InetAddress.getByName("::1").asInstanceOf[Inet6Address]),
+ new AAAARecord(
+ "kittens.com",
+ Ttl.fromPositive(2.seconds),
+ InetAddress.getByName("::2").asInstanceOf[Inet6Address]),
+ new AAAARecord(
+ "donkeys.com",
+ Ttl.fromPositive(2.seconds),
+ InetAddress.getByName("::3").asInstanceOf[Inet6Address])))
val result: ServiceDiscovery.Resolved =
DnsServiceDiscovery.srvRecordsToResolved("cats.com", resolved)
result.serviceName shouldEqual "cats.com"
- result.addresses.toSet shouldEqual Set(ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::1"))),
- ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::2"))))
+ result.addresses.toSet shouldEqual Set(
+ ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::1"))),
+ ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::2"))))
}
}
}
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala
index f5f76ee97a..d0d2034e8d 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala
@@ -103,10 +103,11 @@ object LmdbDurableStore {
private case object WriteBehind extends DeadLetterSuppression
- private final case class Lmdb(env: Env[ByteBuffer],
- db: Dbi[ByteBuffer],
- keyBuffer: ByteBuffer,
- valueBuffer: ByteBuffer)
+ private final case class Lmdb(
+ env: Env[ByteBuffer],
+ db: Dbi[ByteBuffer],
+ keyBuffer: ByteBuffer,
+ valueBuffer: ByteBuffer)
}
final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
@@ -150,9 +151,10 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
val valueBuffer = ByteBuffer.allocateDirect(100 * 1024) // will grow when needed
if (log.isDebugEnabled)
- log.debug("Init of LMDB in directory [{}] took [{} ms]",
- dir.getCanonicalPath,
- TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0))
+ log.debug(
+ "Init of LMDB in directory [{}] took [{} ms]",
+ dir.getCanonicalPath,
+ TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0))
val l = Lmdb(env, db, keyBuffer, valueBuffer)
_lmdb = OptionVal.Some(l)
l
@@ -293,9 +295,10 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
}
tx.commit()
if (log.isDebugEnabled)
- log.debug("store and commit of [{}] entries took [{} ms]",
- pending.size,
- TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0))
+ log.debug(
+ "store and commit of [{}] entries took [{} ms]",
+ pending.size,
+ TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0))
} catch {
case NonFatal(e) =>
import scala.collection.JavaConverters._
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
index 87bd39850d..3a25ceaf66 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
@@ -41,8 +41,9 @@ object GCounter {
* This class is immutable, i.e. "modifying" methods return a new instance.
*/
@SerialVersionUID(1L)
-final class GCounter private[akka] (private[akka] val state: Map[UniqueAddress, BigInt] = Map.empty,
- override val delta: Option[GCounter] = None)
+final class GCounter private[akka] (
+ private[akka] val state: Map[UniqueAddress, BigInt] = Map.empty,
+ override val delta: Option[GCounter] = None)
extends DeltaReplicatedData
with ReplicatedDelta
with ReplicatedDataSerialization
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
index 99f1a81254..1b3af52425 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
@@ -68,9 +68,10 @@ object ORMap {
// PutDeltaOp contains ORSet delta and full value
/** INTERNAL API */
- @InternalApi private[akka] final case class PutDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp,
- value: (A, B),
- zeroTag: ZeroTag)
+ @InternalApi private[akka] final case class PutDeltaOp[A, B <: ReplicatedData](
+ underlying: ORSet.DeltaOp,
+ value: (A, B),
+ zeroTag: ZeroTag)
extends AtomicDeltaOp[A, B] {
override def merge(that: DeltaOp): DeltaOp = that match {
case put: PutDeltaOp[A, B] if this.value._1 == put.value._1 =>
@@ -93,9 +94,10 @@ object ORMap {
// UpdateDeltaOp contains ORSet delta and either delta of value (in case where underlying type supports deltas) or full value
/** INTERNAL API */
- @InternalApi private[akka] final case class UpdateDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp,
- values: Map[A, B],
- zeroTag: ZeroTag)
+ @InternalApi private[akka] final case class UpdateDeltaOp[A, B <: ReplicatedData](
+ underlying: ORSet.DeltaOp,
+ values: Map[A, B],
+ zeroTag: ZeroTag)
extends AtomicDeltaOp[A, B] {
override def merge(that: DeltaOp): DeltaOp = that match {
case update: UpdateDeltaOp[A, B] =>
@@ -117,15 +119,17 @@ object ORMap {
// RemoveDeltaOp does not contain any value at all - the propagated 'value' map would be empty
/** INTERNAL API */
- @InternalApi private[akka] final case class RemoveDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp,
- zeroTag: ZeroTag)
+ @InternalApi private[akka] final case class RemoveDeltaOp[A, B <: ReplicatedData](
+ underlying: ORSet.DeltaOp,
+ zeroTag: ZeroTag)
extends AtomicDeltaOp[A, B]
// RemoveKeyDeltaOp contains a single value - to provide the recipient with the removed key for value map
/** INTERNAL API */
- @InternalApi private[akka] final case class RemoveKeyDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp,
- removedKey: A,
- zeroTag: ZeroTag)
+ @InternalApi private[akka] final case class RemoveKeyDeltaOp[A, B <: ReplicatedData](
+ underlying: ORSet.DeltaOp,
+ removedKey: A,
+ zeroTag: ZeroTag)
extends AtomicDeltaOp[A, B]
// DeltaGroup is effectively a causally ordered list of individual deltas
@@ -169,10 +173,11 @@ object ORMap {
* This class is immutable, i.e. "modifying" methods return a new instance.
*/
@SerialVersionUID(1L)
-final class ORMap[A, B <: ReplicatedData] private[akka] (private[akka] val keys: ORSet[A],
- private[akka] val values: Map[A, B],
- private[akka] val zeroTag: ZeroTag,
- override val delta: Option[ORMap.DeltaOp] = None)
+final class ORMap[A, B <: ReplicatedData] private[akka] (
+ private[akka] val keys: ORSet[A],
+ private[akka] val values: Map[A, B],
+ private[akka] val zeroTag: ZeroTag,
+ override val delta: Option[ORMap.DeltaOp] = None)
extends DeltaReplicatedData
with ReplicatedDataSerialization
with RemovedNodePruning {
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
index 2258d8bc72..127b3540bc 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
@@ -65,8 +65,9 @@ object ORMultiMap {
* Note that on concurrent adds and removals for the same key (on the same set), removals can be lost.
*/
@SerialVersionUID(1L)
-final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, ORSet[B]],
- private[akka] val withValueDeltas: Boolean)
+final class ORMultiMap[A, B] private[akka] (
+ private[akka] val underlying: ORMap[A, ORSet[B]],
+ private[akka] val withValueDeltas: Boolean)
extends DeltaReplicatedData
with ReplicatedDataSerialization
with RemovedNodePruning {
@@ -82,8 +83,9 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
val newValues = newUnderlying.values.filterNot {
case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty
}
- new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta),
- withValueDeltas)
+ new ORMultiMap[A, B](
+ new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta),
+ withValueDeltas)
} else
new ORMultiMap(underlying.merge(that.underlying), withValueDeltas)
} else throw new IllegalArgumentException("Trying to merge two ORMultiMaps of different map sub-type")
@@ -305,10 +307,11 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
/**
* INTERNAL API
*/
- @InternalApi private[akka] def replaceBinding(node: UniqueAddress,
- key: A,
- oldElement: B,
- newElement: B): ORMultiMap[A, B] =
+ @InternalApi private[akka] def replaceBinding(
+ node: UniqueAddress,
+ key: A,
+ oldElement: B,
+ newElement: B): ORMultiMap[A, B] =
if (newElement != oldElement)
addBinding(node, key, newElement).removeBinding(node, key, oldElement)
else
@@ -326,8 +329,9 @@ final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[
val newValues = newUnderlying.values.filterNot {
case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty
}
- new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta),
- withValueDeltas)
+ new ORMultiMap[A, B](
+ new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta),
+ withValueDeltas)
} else
new ORMultiMap(underlying.mergeDelta(thatDelta), withValueDeltas)
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
index 31da85ad7d..07c8c1b985 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
@@ -126,8 +126,9 @@ object ORSet {
*/
@InternalApi private[akka] def subtractDots(dot: Dot, vvector: VersionVector): Dot = {
- @tailrec def dropDots(remaining: List[(UniqueAddress, Long)],
- acc: List[(UniqueAddress, Long)]): List[(UniqueAddress, Long)] =
+ @tailrec def dropDots(
+ remaining: List[(UniqueAddress, Long)],
+ acc: List[(UniqueAddress, Long)]): List[(UniqueAddress, Long)] =
remaining match {
case Nil => acc
case (d @ (node, v1)) :: rest =>
@@ -160,9 +161,10 @@ object ORSet {
* INTERNAL API
* @see [[ORSet#merge]]
*/
- @InternalApi private[akka] def mergeCommonKeys[A](commonKeys: Set[A],
- lhs: ORSet[A],
- rhs: ORSet[A]): Map[A, ORSet.Dot] =
+ @InternalApi private[akka] def mergeCommonKeys[A](
+ commonKeys: Set[A],
+ lhs: ORSet[A],
+ rhs: ORSet[A]): Map[A, ORSet.Dot] =
mergeCommonKeys(commonKeys.iterator, lhs, rhs)
private def mergeCommonKeys[A](commonKeys: Iterator[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = {
@@ -231,16 +233,18 @@ object ORSet {
* INTERNAL API
* @see [[ORSet#merge]]
*/
- @InternalApi private[akka] def mergeDisjointKeys[A](keys: Set[A],
- elementsMap: Map[A, ORSet.Dot],
- vvector: VersionVector,
- accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] =
+ @InternalApi private[akka] def mergeDisjointKeys[A](
+ keys: Set[A],
+ elementsMap: Map[A, ORSet.Dot],
+ vvector: VersionVector,
+ accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] =
mergeDisjointKeys(keys.iterator, elementsMap, vvector, accumulator)
- private def mergeDisjointKeys[A](keys: Iterator[A],
- elementsMap: Map[A, ORSet.Dot],
- vvector: VersionVector,
- accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = {
+ private def mergeDisjointKeys[A](
+ keys: Iterator[A],
+ elementsMap: Map[A, ORSet.Dot],
+ vvector: VersionVector,
+ accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = {
keys.foldLeft(accumulator) {
case (acc, k) =>
val dots = elementsMap(k)
@@ -285,9 +289,10 @@ object ORSet {
* This class is immutable, i.e. "modifying" methods return a new instance.
*/
@SerialVersionUID(1L)
-final class ORSet[A] private[akka] (private[akka] val elementsMap: Map[A, ORSet.Dot],
- private[akka] val vvector: VersionVector,
- override val delta: Option[ORSet.DeltaOp] = None)
+final class ORSet[A] private[akka] (
+ private[akka] val elementsMap: Map[A, ORSet.Dot],
+ private[akka] val vvector: VersionVector,
+ override val delta: Option[ORSet.DeltaOp] = None)
extends DeltaReplicatedData
with ReplicatedDataSerialization
with RemovedNodePruning
@@ -530,9 +535,10 @@ final class ORSet[A] private[akka] (private[akka] val elementsMap: Map[A, ORSet.
new ORSet(updated, vvector.pruningCleanup(removedNode))
}
- private def copy(elementsMap: Map[A, ORSet.Dot] = this.elementsMap,
- vvector: VersionVector = this.vvector,
- delta: Option[ORSet.DeltaOp] = this.delta): ORSet[A] =
+ private def copy(
+ elementsMap: Map[A, ORSet.Dot] = this.elementsMap,
+ vvector: VersionVector = this.vvector,
+ delta: Option[ORSet.DeltaOp] = this.delta): ORSet[A] =
new ORSet(elementsMap, vvector, delta)
// this class cannot be a `case class` because we need different `unapply`
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
index 3d933d8fb1..a19bbb26d2 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
@@ -205,8 +205,9 @@ final class PNCounter private[akka] (private[akka] val increments: GCounter, pri
increments.needPruningFrom(removedNode) || decrements.needPruningFrom(removedNode)
override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): PNCounter =
- copy(increments = increments.prune(removedNode, collapseInto),
- decrements = decrements.prune(removedNode, collapseInto))
+ copy(
+ increments = increments.prune(removedNode, collapseInto),
+ decrements = decrements.prune(removedNode, collapseInto))
override def pruningCleanup(removedNode: UniqueAddress): PNCounter =
copy(increments = increments.pruningCleanup(removedNode), decrements = decrements.pruningCleanup(removedNode))
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
index bbf68ffe83..802ed97244 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
@@ -138,120 +138,129 @@ object ReplicatorSettings {
* `*` at the end of a key. All entries can be made durable by including "*"
* in the `Set`.
*/
-final class ReplicatorSettings(val roles: Set[String],
- val gossipInterval: FiniteDuration,
- val notifySubscribersInterval: FiniteDuration,
- val maxDeltaElements: Int,
- val dispatcher: String,
- val pruningInterval: FiniteDuration,
- val maxPruningDissemination: FiniteDuration,
- val durableStoreProps: Either[(String, Config), Props],
- val durableKeys: Set[KeyId],
- val pruningMarkerTimeToLive: FiniteDuration,
- val durablePruningMarkerTimeToLive: FiniteDuration,
- val deltaCrdtEnabled: Boolean,
- val maxDeltaSize: Int) {
+final class ReplicatorSettings(
+ val roles: Set[String],
+ val gossipInterval: FiniteDuration,
+ val notifySubscribersInterval: FiniteDuration,
+ val maxDeltaElements: Int,
+ val dispatcher: String,
+ val pruningInterval: FiniteDuration,
+ val maxPruningDissemination: FiniteDuration,
+ val durableStoreProps: Either[(String, Config), Props],
+ val durableKeys: Set[KeyId],
+ val pruningMarkerTimeToLive: FiniteDuration,
+ val durablePruningMarkerTimeToLive: FiniteDuration,
+ val deltaCrdtEnabled: Boolean,
+ val maxDeltaSize: Int) {
// for backwards compatibility
- def this(role: Option[String],
- gossipInterval: FiniteDuration,
- notifySubscribersInterval: FiniteDuration,
- maxDeltaElements: Int,
- dispatcher: String,
- pruningInterval: FiniteDuration,
- maxPruningDissemination: FiniteDuration,
- durableStoreProps: Either[(String, Config), Props],
- durableKeys: Set[KeyId],
- pruningMarkerTimeToLive: FiniteDuration,
- durablePruningMarkerTimeToLive: FiniteDuration,
- deltaCrdtEnabled: Boolean,
- maxDeltaSize: Int) =
- this(role.toSet,
- gossipInterval,
- notifySubscribersInterval,
- maxDeltaElements,
- dispatcher,
- pruningInterval,
- maxPruningDissemination,
- durableStoreProps,
- durableKeys,
- pruningMarkerTimeToLive,
- durablePruningMarkerTimeToLive,
- deltaCrdtEnabled,
- maxDeltaSize)
+ def this(
+ role: Option[String],
+ gossipInterval: FiniteDuration,
+ notifySubscribersInterval: FiniteDuration,
+ maxDeltaElements: Int,
+ dispatcher: String,
+ pruningInterval: FiniteDuration,
+ maxPruningDissemination: FiniteDuration,
+ durableStoreProps: Either[(String, Config), Props],
+ durableKeys: Set[KeyId],
+ pruningMarkerTimeToLive: FiniteDuration,
+ durablePruningMarkerTimeToLive: FiniteDuration,
+ deltaCrdtEnabled: Boolean,
+ maxDeltaSize: Int) =
+ this(
+ role.toSet,
+ gossipInterval,
+ notifySubscribersInterval,
+ maxDeltaElements,
+ dispatcher,
+ pruningInterval,
+ maxPruningDissemination,
+ durableStoreProps,
+ durableKeys,
+ pruningMarkerTimeToLive,
+ durablePruningMarkerTimeToLive,
+ deltaCrdtEnabled,
+ maxDeltaSize)
// For backwards compatibility
- def this(role: Option[String],
- gossipInterval: FiniteDuration,
- notifySubscribersInterval: FiniteDuration,
- maxDeltaElements: Int,
- dispatcher: String,
- pruningInterval: FiniteDuration,
- maxPruningDissemination: FiniteDuration) =
- this(roles = role.toSet,
- gossipInterval,
- notifySubscribersInterval,
- maxDeltaElements,
- dispatcher,
- pruningInterval,
- maxPruningDissemination,
- Right(Props.empty),
- Set.empty,
- 6.hours,
- 10.days,
- true,
- 200)
+ def this(
+ role: Option[String],
+ gossipInterval: FiniteDuration,
+ notifySubscribersInterval: FiniteDuration,
+ maxDeltaElements: Int,
+ dispatcher: String,
+ pruningInterval: FiniteDuration,
+ maxPruningDissemination: FiniteDuration) =
+ this(
+ roles = role.toSet,
+ gossipInterval,
+ notifySubscribersInterval,
+ maxDeltaElements,
+ dispatcher,
+ pruningInterval,
+ maxPruningDissemination,
+ Right(Props.empty),
+ Set.empty,
+ 6.hours,
+ 10.days,
+ true,
+ 200)
// For backwards compatibility
- def this(role: Option[String],
- gossipInterval: FiniteDuration,
- notifySubscribersInterval: FiniteDuration,
- maxDeltaElements: Int,
- dispatcher: String,
- pruningInterval: FiniteDuration,
- maxPruningDissemination: FiniteDuration,
- durableStoreProps: Either[(String, Config), Props],
- durableKeys: Set[String]) =
- this(role,
- gossipInterval,
- notifySubscribersInterval,
- maxDeltaElements,
- dispatcher,
- pruningInterval,
- maxPruningDissemination,
- durableStoreProps,
- durableKeys,
- 6.hours,
- 10.days,
- true,
- 200)
+ def this(
+ role: Option[String],
+ gossipInterval: FiniteDuration,
+ notifySubscribersInterval: FiniteDuration,
+ maxDeltaElements: Int,
+ dispatcher: String,
+ pruningInterval: FiniteDuration,
+ maxPruningDissemination: FiniteDuration,
+ durableStoreProps: Either[(String, Config), Props],
+ durableKeys: Set[String]) =
+ this(
+ role,
+ gossipInterval,
+ notifySubscribersInterval,
+ maxDeltaElements,
+ dispatcher,
+ pruningInterval,
+ maxPruningDissemination,
+ durableStoreProps,
+ durableKeys,
+ 6.hours,
+ 10.days,
+ true,
+ 200)
// For backwards compatibility
- def this(role: Option[String],
- gossipInterval: FiniteDuration,
- notifySubscribersInterval: FiniteDuration,
- maxDeltaElements: Int,
- dispatcher: String,
- pruningInterval: FiniteDuration,
- maxPruningDissemination: FiniteDuration,
- durableStoreProps: Either[(String, Config), Props],
- durableKeys: Set[String],
- pruningMarkerTimeToLive: FiniteDuration,
- durablePruningMarkerTimeToLive: FiniteDuration,
- deltaCrdtEnabled: Boolean) =
- this(role,
- gossipInterval,
- notifySubscribersInterval,
- maxDeltaElements,
- dispatcher,
- pruningInterval,
- maxPruningDissemination,
- durableStoreProps,
- durableKeys,
- pruningMarkerTimeToLive,
- durablePruningMarkerTimeToLive,
- deltaCrdtEnabled,
- 200)
+ def this(
+ role: Option[String],
+ gossipInterval: FiniteDuration,
+ notifySubscribersInterval: FiniteDuration,
+ maxDeltaElements: Int,
+ dispatcher: String,
+ pruningInterval: FiniteDuration,
+ maxPruningDissemination: FiniteDuration,
+ durableStoreProps: Either[(String, Config), Props],
+ durableKeys: Set[String],
+ pruningMarkerTimeToLive: FiniteDuration,
+ durablePruningMarkerTimeToLive: FiniteDuration,
+ deltaCrdtEnabled: Boolean) =
+ this(
+ role,
+ gossipInterval,
+ notifySubscribersInterval,
+ maxDeltaElements,
+ dispatcher,
+ pruningInterval,
+ maxPruningDissemination,
+ durableStoreProps,
+ durableKeys,
+ pruningMarkerTimeToLive,
+ durablePruningMarkerTimeToLive,
+ deltaCrdtEnabled,
+ 200)
def withRole(role: String): ReplicatorSettings = copy(roles = ReplicatorSettings.roleOption(role).toSet)
@@ -288,10 +297,12 @@ final class ReplicatorSettings(val roles: Set[String],
def withPruning(pruningInterval: FiniteDuration, maxPruningDissemination: FiniteDuration): ReplicatorSettings =
copy(pruningInterval = pruningInterval, maxPruningDissemination = maxPruningDissemination)
- def withPruningMarkerTimeToLive(pruningMarkerTimeToLive: FiniteDuration,
- durablePruningMarkerTimeToLive: FiniteDuration): ReplicatorSettings =
- copy(pruningMarkerTimeToLive = pruningMarkerTimeToLive,
- durablePruningMarkerTimeToLive = durablePruningMarkerTimeToLive)
+ def withPruningMarkerTimeToLive(
+ pruningMarkerTimeToLive: FiniteDuration,
+ durablePruningMarkerTimeToLive: FiniteDuration): ReplicatorSettings =
+ copy(
+ pruningMarkerTimeToLive = pruningMarkerTimeToLive,
+ durablePruningMarkerTimeToLive = durablePruningMarkerTimeToLive)
def withDurableStoreProps(durableStoreProps: Props): ReplicatorSettings =
copy(durableStoreProps = Right(durableStoreProps))
@@ -316,32 +327,34 @@ final class ReplicatorSettings(val roles: Set[String],
def withMaxDeltaSize(maxDeltaSize: Int): ReplicatorSettings =
copy(maxDeltaSize = maxDeltaSize)
- private def copy(roles: Set[String] = roles,
- gossipInterval: FiniteDuration = gossipInterval,
- notifySubscribersInterval: FiniteDuration = notifySubscribersInterval,
- maxDeltaElements: Int = maxDeltaElements,
- dispatcher: String = dispatcher,
- pruningInterval: FiniteDuration = pruningInterval,
- maxPruningDissemination: FiniteDuration = maxPruningDissemination,
- durableStoreProps: Either[(String, Config), Props] = durableStoreProps,
- durableKeys: Set[KeyId] = durableKeys,
- pruningMarkerTimeToLive: FiniteDuration = pruningMarkerTimeToLive,
- durablePruningMarkerTimeToLive: FiniteDuration = durablePruningMarkerTimeToLive,
- deltaCrdtEnabled: Boolean = deltaCrdtEnabled,
- maxDeltaSize: Int = maxDeltaSize): ReplicatorSettings =
- new ReplicatorSettings(roles,
- gossipInterval,
- notifySubscribersInterval,
- maxDeltaElements,
- dispatcher,
- pruningInterval,
- maxPruningDissemination,
- durableStoreProps,
- durableKeys,
- pruningMarkerTimeToLive,
- durablePruningMarkerTimeToLive,
- deltaCrdtEnabled,
- maxDeltaSize)
+ private def copy(
+ roles: Set[String] = roles,
+ gossipInterval: FiniteDuration = gossipInterval,
+ notifySubscribersInterval: FiniteDuration = notifySubscribersInterval,
+ maxDeltaElements: Int = maxDeltaElements,
+ dispatcher: String = dispatcher,
+ pruningInterval: FiniteDuration = pruningInterval,
+ maxPruningDissemination: FiniteDuration = maxPruningDissemination,
+ durableStoreProps: Either[(String, Config), Props] = durableStoreProps,
+ durableKeys: Set[KeyId] = durableKeys,
+ pruningMarkerTimeToLive: FiniteDuration = pruningMarkerTimeToLive,
+ durablePruningMarkerTimeToLive: FiniteDuration = durablePruningMarkerTimeToLive,
+ deltaCrdtEnabled: Boolean = deltaCrdtEnabled,
+ maxDeltaSize: Int = maxDeltaSize): ReplicatorSettings =
+ new ReplicatorSettings(
+ roles,
+ gossipInterval,
+ notifySubscribersInterval,
+ maxDeltaElements,
+ dispatcher,
+ pruningInterval,
+ maxPruningDissemination,
+ durableStoreProps,
+ durableKeys,
+ pruningMarkerTimeToLive,
+ durablePruningMarkerTimeToLive,
+ deltaCrdtEnabled,
+ maxDeltaSize)
}
object Replicator {
@@ -350,8 +363,9 @@ object Replicator {
* Factory method for the [[akka.actor.Props]] of the [[Replicator]] actor.
*/
def props(settings: ReplicatorSettings): Props = {
- require(settings.durableKeys.isEmpty || (settings.durableStoreProps != Right(Props.empty)),
- "durableStoreProps must be defined when durableKeys are defined")
+ require(
+ settings.durableKeys.isEmpty || (settings.durableStoreProps != Right(Props.empty)),
+ "durableStoreProps must be defined when durableKeys are defined")
Props(new Replicator(settings)).withDeploy(Deploy.local).withDispatcher(settings.dispatcher)
}
@@ -578,10 +592,11 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or local correlation data structures.
*/
- def apply[A <: ReplicatedData](key: Key[A],
- initial: A,
- writeConsistency: WriteConsistency,
- request: Option[Any] = None)(modify: A => A): Update[A] =
+ def apply[A <: ReplicatedData](
+ key: Key[A],
+ initial: A,
+ writeConsistency: WriteConsistency,
+ request: Option[Any] = None)(modify: A => A): Update[A] =
Update(key, writeConsistency, request)(modifyWithInitial(initial, modify))
private def modifyWithInitial[A <: ReplicatedData](initial: A, modify: A => A): Option[A] => A = {
@@ -632,11 +647,12 @@ object Replicator {
* way to pass contextual information (e.g. original sender) without having to use `ask`
* or local correlation data structures.
*/
- def this(key: Key[A],
- initial: A,
- writeConsistency: WriteConsistency,
- request: Optional[Any],
- modify: JFunction[A, A]) =
+ def this(
+ key: Key[A],
+ initial: A,
+ writeConsistency: WriteConsistency,
+ request: Optional[Any],
+ modify: JFunction[A, A]) =
this(key, writeConsistency, Option(request.orElse(null)))(
Update.modifyWithInitial(initial, data => modify.apply(data)))
@@ -669,10 +685,11 @@ object Replicator {
* If the `modify` function of the [[Update]] throws an exception the reply message
* will be this `ModifyFailure` message. The original exception is included as `cause`.
*/
- final case class ModifyFailure[A <: ReplicatedData](key: Key[A],
- errorMessage: String,
- cause: Throwable,
- request: Option[Any])
+ final case class ModifyFailure[A <: ReplicatedData](
+ key: Key[A],
+ errorMessage: String,
+ cause: Throwable,
+ request: Option[Any])
extends UpdateFailure[A] {
override def toString: String = s"ModifyFailure [$key]: $errorMessage"
}
@@ -792,9 +809,10 @@ object Replicator {
/**
* The `DataEnvelope` wraps a data entry and carries state of the pruning process for the entry.
*/
- final case class DataEnvelope(data: ReplicatedData,
- pruning: Map[UniqueAddress, PruningState] = Map.empty,
- deltaVersions: VersionVector = VersionVector.empty)
+ final case class DataEnvelope(
+ data: ReplicatedData,
+ pruning: Map[UniqueAddress, PruningState] = Map.empty,
+ deltaVersions: VersionVector = VersionVector.empty)
extends ReplicatorMessage {
import PruningState._
@@ -817,8 +835,9 @@ object Replicator {
}
def initRemovedNodePruning(removed: UniqueAddress, owner: UniqueAddress): DataEnvelope = {
- copy(pruning = pruning.updated(removed, PruningInitialized(owner, Set.empty)),
- deltaVersions = cleanedDeltaVersions(removed))
+ copy(
+ pruning = pruning.updated(removed, PruningInitialized(owner, Set.empty)),
+ deltaVersions = cleanedDeltaVersions(removed))
}
def prune(from: UniqueAddress, pruningPerformed: PruningPerformed): DataEnvelope = {
@@ -828,9 +847,10 @@ object Replicator {
pruning(from) match {
case PruningInitialized(owner, _) =>
val prunedData = dataWithRemovedNodePruning.prune(from, owner)
- copy(data = prunedData,
- pruning = pruning.updated(from, pruningPerformed),
- deltaVersions = cleanedDeltaVersions(from))
+ copy(
+ data = prunedData,
+ pruning = pruning.updated(from, pruningPerformed),
+ deltaVersions = cleanedDeltaVersions(from))
case _ =>
this
}
@@ -874,9 +894,10 @@ object Replicator {
val mergedDeltaVersions = cleanedDV.merge(cleanedOtherDV)
// cleanup both sides before merging, `merge(otherData: ReplicatedData)` will cleanup other.data
- copy(data = cleaned(data, filteredMergedPruning),
- deltaVersions = mergedDeltaVersions,
- pruning = filteredMergedPruning).merge(other.data)
+ copy(
+ data = cleaned(data, filteredMergedPruning),
+ deltaVersions = mergedDeltaVersions,
+ pruning = filteredMergedPruning).merge(other.data)
}
def merge(otherData: ReplicatedData): DataEnvelope = {
@@ -1166,8 +1187,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val selfUniqueAddress = cluster.selfUniqueAddress
require(!cluster.isTerminated, "Cluster node must not be terminated")
- require(roles.subsetOf(cluster.selfRoles),
- s"This cluster member [${selfAddress}] doesn't have all the roles [${roles.mkString(", ")}]")
+ require(
+ roles.subsetOf(cluster.selfRoles),
+ s"This cluster member [${selfAddress}] doesn't have all the roles [${roles.mkString(", ")}]")
//Start periodic gossip to random nodes in cluster
import context.dispatcher
@@ -1210,15 +1232,16 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
override def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation = {
// Important to include the pruning state in the deltas. For example if the delta is based
// on an entry that has been pruned but that has not yet been performed on the target node.
- DeltaPropagation(selfUniqueAddress,
- reply = false,
- deltas.iterator.collect {
- case (key, (d, fromSeqNr, toSeqNr)) if d != NoDeltaPlaceholder =>
- getData(key) match {
- case Some(envelope) => key -> Delta(envelope.copy(data = d), fromSeqNr, toSeqNr)
- case None => key -> Delta(DataEnvelope(d), fromSeqNr, toSeqNr)
- }
- }.toMap)
+ DeltaPropagation(
+ selfUniqueAddress,
+ reply = false,
+ deltas.iterator.collect {
+ case (key, (d, fromSeqNr, toSeqNr)) if d != NoDeltaPlaceholder =>
+ getData(key) match {
+ case Some(envelope) => key -> Delta(envelope.copy(data = d), fromSeqNr, toSeqNr)
+ case None => key -> Delta(DataEnvelope(d), fromSeqNr, toSeqNr)
+ }
+ }.toMap)
}
}
val deltaPropagationTask: Option[Cancellable] =
@@ -1302,9 +1325,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def fromDurableStore: Boolean = sender() == durableStore && sender() != context.system.deadLetters
OneForOneStrategy()(({
case e @ (_: DurableStore.LoadFailed | _: ActorInitializationException) if fromDurableStore =>
- log.error(e,
- "Stopping distributed-data Replicator due to load or startup failure in durable store, caused by: {}",
- if (e.getCause eq null) "" else e.getCause.getMessage)
+ log.error(
+ e,
+ "Stopping distributed-data Replicator due to load or startup failure in durable store, caused by: {}",
+ if (e.getCause eq null) "" else e.getCause.getMessage)
context.stop(self)
SupervisorStrategy.Stop
}: SupervisorStrategy.Decider).orElse(SupervisorStrategy.defaultDecider))
@@ -1344,10 +1368,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
}
case LoadAllCompleted =>
- log.debug("Loading {} entries from durable store took {} ms, stashed {}",
- count,
- TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime),
- stash.size)
+ log.debug(
+ "Loading {} entries from durable store took {} ms, stashed {}",
+ count,
+ TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime),
+ stash.size)
context.become(normalReceive)
unstashAll()
self ! FlushChanges
@@ -1429,10 +1454,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def isLocalSender(): Boolean = !replyTo.path.address.hasGlobalScope
- def receiveUpdate(key: KeyR,
- modify: Option[ReplicatedData] => ReplicatedData,
- writeConsistency: WriteConsistency,
- req: Option[Any]): Unit = {
+ def receiveUpdate(
+ key: KeyR,
+ modify: Option[ReplicatedData] => ReplicatedData,
+ writeConsistency: WriteConsistency,
+ req: Option[Any]): Unit = {
val localValue = getData(key.id)
def deltaOrPlaceholder(d: DeltaReplicatedData): Option[ReplicatedDelta] = {
@@ -1476,9 +1502,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val durable = isDurable(key.id)
if (isLocalUpdate(writeConsistency)) {
if (durable)
- durableStore ! Store(key.id,
- new DurableDataEnvelope(newEnvelope),
- Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), replyTo)))
+ durableStore ! Store(
+ key.id,
+ new DurableDataEnvelope(newEnvelope),
+ Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), replyTo)))
else
replyTo ! UpdateSuccess(key, req)
} else {
@@ -1496,9 +1523,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
.props(key, writeEnvelope, writeDelta, writeConsistency, req, nodes, unreachable, replyTo, durable)
.withDispatcher(context.props.dispatcher))
if (durable) {
- durableStore ! Store(key.id,
- new DurableDataEnvelope(newEnvelope),
- Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), writeAggregator)))
+ durableStore ! Store(
+ key.id,
+ new DurableDataEnvelope(newEnvelope),
+ Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), writeAggregator)))
}
}
case Failure(e: DataDeleted[_]) =>
@@ -1591,9 +1619,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val durable = isDurable(key.id)
if (isLocalUpdate(consistency)) {
if (durable)
- durableStore ! Store(key.id,
- new DurableDataEnvelope(DeletedEnvelope),
- Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), replyTo)))
+ durableStore ! Store(
+ key.id,
+ new DurableDataEnvelope(DeletedEnvelope),
+ Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), replyTo)))
else
replyTo ! DeleteSuccess(key, req)
} else {
@@ -1603,9 +1632,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
.props(key, DeletedEnvelope, None, consistency, req, nodes, unreachable, replyTo, durable)
.withDispatcher(context.props.dispatcher))
if (durable) {
- durableStore ! Store(key.id,
- new DurableDataEnvelope(DeletedEnvelope),
- Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), writeAggregator)))
+ durableStore ! Store(
+ key.id,
+ new DurableDataEnvelope(DeletedEnvelope),
+ Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), writeAggregator)))
}
}
}
@@ -1719,11 +1749,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
try {
val isDebugEnabled = log.isDebugEnabled
if (isDebugEnabled)
- log.debug("Received DeltaPropagation from [{}], containing [{}]",
- fromNode.address,
- deltas
- .collect { case (key, Delta(_, fromSeqNr, toSeqNr)) => s"$key $fromSeqNr-$toSeqNr" }
- .mkString(", "))
+ log.debug(
+ "Received DeltaPropagation from [{}], containing [{}]",
+ fromNode.address,
+ deltas.collect { case (key, Delta(_, fromSeqNr, toSeqNr)) => s"$key $fromSeqNr-$toSeqNr" }.mkString(", "))
if (isNodeRemoved(fromNode, deltas.keys)) {
// Late message from a removed node.
@@ -1736,27 +1765,30 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val currentSeqNr = getDeltaSeqNr(key, fromNode)
if (currentSeqNr >= toSeqNr) {
if (isDebugEnabled)
- log.debug("Skipping DeltaPropagation from [{}] for [{}] because toSeqNr [{}] already handled [{}]",
- fromNode.address,
- key,
- toSeqNr,
- currentSeqNr)
+ log.debug(
+ "Skipping DeltaPropagation from [{}] for [{}] because toSeqNr [{}] already handled [{}]",
+ fromNode.address,
+ key,
+ toSeqNr,
+ currentSeqNr)
if (reply) replyTo ! WriteAck
} else if (fromSeqNr > (currentSeqNr + 1)) {
if (isDebugEnabled)
- log.debug("Skipping DeltaPropagation from [{}] for [{}] because missing deltas between [{}-{}]",
- fromNode.address,
- key,
- currentSeqNr + 1,
- fromSeqNr - 1)
+ log.debug(
+ "Skipping DeltaPropagation from [{}] for [{}] because missing deltas between [{}-{}]",
+ fromNode.address,
+ key,
+ currentSeqNr + 1,
+ fromSeqNr - 1)
if (reply) replyTo ! DeltaNack
} else {
if (isDebugEnabled)
- log.debug("Applying DeltaPropagation from [{}] for [{}] with sequence numbers [{}], current was [{}]",
- fromNode.address,
- key,
- s"$fromSeqNr-$toSeqNr",
- currentSeqNr)
+ log.debug(
+ "Applying DeltaPropagation from [{}] for [{}] with sequence numbers [{}], current was [{}]",
+ fromNode.address,
+ key,
+ s"$fromSeqNr-$toSeqNr",
+ currentSeqNr)
val newEnvelope = envelope.copy(deltaVersions = VersionVector(fromNode, toSeqNr))
writeAndStore(key, newEnvelope, reply)
}
@@ -1812,11 +1844,12 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def receiveStatus(otherDigests: Map[KeyId, Digest], chunk: Int, totChunks: Int): Unit = {
if (log.isDebugEnabled)
- log.debug("Received gossip status from [{}], chunk [{}] of [{}] containing [{}]",
- replyTo.path.address,
- (chunk + 1),
- totChunks,
- otherDigests.keys.mkString(", "))
+ log.debug(
+ "Received gossip status from [{}], chunk [{}] of [{}] containing [{}]",
+ replyTo.path.address,
+ (chunk + 1),
+ totChunks,
+ otherDigests.keys.mkString(", "))
def isOtherDifferent(key: KeyId, otherDigest: Digest): Boolean = {
val d = getDigest(key)
@@ -1840,9 +1873,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val myMissingKeys = otherKeys.diff(myKeys)
if (myMissingKeys.nonEmpty) {
if (log.isDebugEnabled)
- log.debug("Sending gossip status to [{}], requesting missing [{}]",
- replyTo.path.address,
- myMissingKeys.mkString(", "))
+ log.debug(
+ "Sending gossip status to [{}], requesting missing [{}]",
+ replyTo.path.address,
+ myMissingKeys.mkString(", "))
val status = Status(myMissingKeys.iterator.map(k => k -> NotFoundDigest).toMap, chunk, totChunks)
replyTo ! status
}
@@ -2131,15 +2165,16 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
* INTERNAL API
*/
@InternalApi private[akka] object WriteAggregator {
- def props(key: KeyR,
- envelope: Replicator.Internal.DataEnvelope,
- delta: Option[Replicator.Internal.Delta],
- consistency: Replicator.WriteConsistency,
- req: Option[Any],
- nodes: Set[Address],
- unreachable: Set[Address],
- replyTo: ActorRef,
- durable: Boolean): Props =
+ def props(
+ key: KeyR,
+ envelope: Replicator.Internal.DataEnvelope,
+ delta: Option[Replicator.Internal.Delta],
+ consistency: Replicator.WriteConsistency,
+ req: Option[Any],
+ nodes: Set[Address],
+ unreachable: Set[Address],
+ replyTo: ActorRef,
+ durable: Boolean): Props =
Props(new WriteAggregator(key, envelope, delta, consistency, req, nodes, unreachable, replyTo, durable))
.withDeploy(Deploy.local)
}
@@ -2147,15 +2182,16 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
/**
* INTERNAL API
*/
-@InternalApi private[akka] class WriteAggregator(key: KeyR,
- envelope: Replicator.Internal.DataEnvelope,
- delta: Option[Replicator.Internal.Delta],
- consistency: Replicator.WriteConsistency,
- req: Option[Any],
- override val nodes: Set[Address],
- override val unreachable: Set[Address],
- replyTo: ActorRef,
- durable: Boolean)
+@InternalApi private[akka] class WriteAggregator(
+ key: KeyR,
+ envelope: Replicator.Internal.DataEnvelope,
+ delta: Option[Replicator.Internal.Delta],
+ consistency: Replicator.WriteConsistency,
+ req: Option[Any],
+ override val nodes: Set[Address],
+ override val unreachable: Set[Address],
+ replyTo: ActorRef,
+ durable: Boolean)
extends ReadWriteAggregator {
import Replicator._
@@ -2270,13 +2306,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
* INTERNAL API
*/
@InternalApi private[akka] object ReadAggregator {
- def props(key: KeyR,
- consistency: Replicator.ReadConsistency,
- req: Option[Any],
- nodes: Set[Address],
- unreachable: Set[Address],
- localValue: Option[Replicator.Internal.DataEnvelope],
- replyTo: ActorRef): Props =
+ def props(
+ key: KeyR,
+ consistency: Replicator.ReadConsistency,
+ req: Option[Any],
+ nodes: Set[Address],
+ unreachable: Set[Address],
+ localValue: Option[Replicator.Internal.DataEnvelope],
+ replyTo: ActorRef): Props =
Props(new ReadAggregator(key, consistency, req, nodes, unreachable, localValue, replyTo)).withDeploy(Deploy.local)
}
@@ -2284,13 +2321,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
/**
* INTERNAL API
*/
-@InternalApi private[akka] class ReadAggregator(key: KeyR,
- consistency: Replicator.ReadConsistency,
- req: Option[Any],
- override val nodes: Set[Address],
- override val unreachable: Set[Address],
- localValue: Option[Replicator.Internal.DataEnvelope],
- replyTo: ActorRef)
+@InternalApi private[akka] class ReadAggregator(
+ key: KeyR,
+ consistency: Replicator.ReadConsistency,
+ req: Option[Any],
+ override val nodes: Set[Address],
+ override val unreachable: Set[Address],
+ localValue: Option[Replicator.Internal.DataEnvelope],
+ replyTo: ActorRef)
extends ReadWriteAggregator {
import Replicator._
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
index 5a7fe67b79..5fef03878e 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
@@ -182,9 +182,10 @@ sealed abstract class VersionVector extends ReplicatedData with ReplicatedDataSe
private final def compareOnlyTo(that: VersionVector, order: Ordering): Ordering = {
def nextOrElse[A](iter: Iterator[A], default: A): A = if (iter.hasNext) iter.next() else default
- def compare(i1: Iterator[(UniqueAddress, Long)],
- i2: Iterator[(UniqueAddress, Long)],
- requestedOrder: Ordering): Ordering = {
+ def compare(
+ i1: Iterator[(UniqueAddress, Long)],
+ i2: Iterator[(UniqueAddress, Long)],
+ requestedOrder: Ordering): Ordering = {
@tailrec
def compareNext(nt1: (UniqueAddress, Long), nt2: (UniqueAddress, Long), currentOrder: Ordering): Ordering =
if ((requestedOrder ne FullOrder) && (currentOrder ne Same) && (currentOrder ne requestedOrder)) currentOrder
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
index 5c3ee1ebed..10f0e7a396 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
@@ -115,9 +115,10 @@ private object ReplicatedDataSerializer {
builder.setLongKey(key).setValue(value).build()
override def setIntKey(builder: rd.ORMap.Entry.Builder, key: Int, value: dm.OtherMessage): rd.ORMap.Entry =
builder.setIntKey(key).setValue(value).build()
- override def setOtherKey(builder: rd.ORMap.Entry.Builder,
- key: dm.OtherMessage,
- value: dm.OtherMessage): rd.ORMap.Entry = builder.setOtherKey(key).setValue(value).build()
+ override def setOtherKey(
+ builder: rd.ORMap.Entry.Builder,
+ key: dm.OtherMessage,
+ value: dm.OtherMessage): rd.ORMap.Entry = builder.setOtherKey(key).setValue(value).build()
override def hasStringKey(entry: rd.ORMap.Entry): Boolean = entry.hasStringKey
override def getStringKey(entry: rd.ORMap.Entry): String = entry.getStringKey
override def hasIntKey(entry: rd.ORMap.Entry): Boolean = entry.hasIntKey
@@ -138,9 +139,10 @@ private object ReplicatedDataSerializer {
builder.setLongKey(key).setValue(value).build()
override def setIntKey(builder: rd.LWWMap.Entry.Builder, key: Int, value: rd.LWWRegister): rd.LWWMap.Entry =
builder.setIntKey(key).setValue(value).build()
- override def setOtherKey(builder: rd.LWWMap.Entry.Builder,
- key: OtherMessage,
- value: rd.LWWRegister): rd.LWWMap.Entry = builder.setOtherKey(key).setValue(value).build()
+ override def setOtherKey(
+ builder: rd.LWWMap.Entry.Builder,
+ key: OtherMessage,
+ value: rd.LWWRegister): rd.LWWMap.Entry = builder.setOtherKey(key).setValue(value).build()
override def hasStringKey(entry: rd.LWWMap.Entry): Boolean = entry.hasStringKey
override def getStringKey(entry: rd.LWWMap.Entry): String = entry.getStringKey
override def hasIntKey(entry: rd.LWWMap.Entry): Boolean = entry.hasIntKey
@@ -155,20 +157,24 @@ private object ReplicatedDataSerializer {
implicit object PNCounterMapEntry
extends ProtoMapEntryWriter[rd.PNCounterMap.Entry, rd.PNCounterMap.Entry.Builder, rd.PNCounter]
with ProtoMapEntryReader[rd.PNCounterMap.Entry, rd.PNCounter] {
- override def setStringKey(builder: rd.PNCounterMap.Entry.Builder,
- key: String,
- value: rd.PNCounter): rd.PNCounterMap.Entry =
+ override def setStringKey(
+ builder: rd.PNCounterMap.Entry.Builder,
+ key: String,
+ value: rd.PNCounter): rd.PNCounterMap.Entry =
builder.setStringKey(key).setValue(value).build()
- override def setLongKey(builder: rd.PNCounterMap.Entry.Builder,
- key: Long,
- value: rd.PNCounter): rd.PNCounterMap.Entry =
+ override def setLongKey(
+ builder: rd.PNCounterMap.Entry.Builder,
+ key: Long,
+ value: rd.PNCounter): rd.PNCounterMap.Entry =
builder.setLongKey(key).setValue(value).build()
- override def setIntKey(builder: rd.PNCounterMap.Entry.Builder,
- key: Int,
- value: rd.PNCounter): rd.PNCounterMap.Entry = builder.setIntKey(key).setValue(value).build()
- override def setOtherKey(builder: rd.PNCounterMap.Entry.Builder,
- key: OtherMessage,
- value: rd.PNCounter): rd.PNCounterMap.Entry =
+ override def setIntKey(
+ builder: rd.PNCounterMap.Entry.Builder,
+ key: Int,
+ value: rd.PNCounter): rd.PNCounterMap.Entry = builder.setIntKey(key).setValue(value).build()
+ override def setOtherKey(
+ builder: rd.PNCounterMap.Entry.Builder,
+ key: OtherMessage,
+ value: rd.PNCounter): rd.PNCounterMap.Entry =
builder.setOtherKey(key).setValue(value).build()
override def hasStringKey(entry: rd.PNCounterMap.Entry): Boolean = entry.hasStringKey
override def getStringKey(entry: rd.PNCounterMap.Entry): String = entry.getStringKey
@@ -190,9 +196,10 @@ private object ReplicatedDataSerializer {
builder.setLongKey(key).setValue(value).build()
override def setIntKey(builder: rd.ORMultiMap.Entry.Builder, key: Int, value: rd.ORSet): rd.ORMultiMap.Entry =
builder.setIntKey(key).setValue(value).build()
- override def setOtherKey(builder: rd.ORMultiMap.Entry.Builder,
- key: dm.OtherMessage,
- value: rd.ORSet): rd.ORMultiMap.Entry = builder.setOtherKey(key).setValue(value).build()
+ override def setOtherKey(
+ builder: rd.ORMultiMap.Entry.Builder,
+ key: dm.OtherMessage,
+ value: rd.ORSet): rd.ORMultiMap.Entry = builder.setOtherKey(key).setValue(value).build()
override def hasStringKey(entry: rd.ORMultiMap.Entry): Boolean = entry.hasStringKey
override def getStringKey(entry: rd.ORMultiMap.Entry): String = entry.getStringKey
override def hasIntKey(entry: rd.ORMultiMap.Entry): Boolean = entry.hasIntKey
@@ -207,21 +214,25 @@ private object ReplicatedDataSerializer {
implicit object ORMapDeltaGroupEntry
extends ProtoMapEntryWriter[rd.ORMapDeltaGroup.MapEntry, rd.ORMapDeltaGroup.MapEntry.Builder, dm.OtherMessage]
with ProtoMapEntryReader[rd.ORMapDeltaGroup.MapEntry, dm.OtherMessage] {
- override def setStringKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder,
- key: String,
- value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
+ override def setStringKey(
+ builder: rd.ORMapDeltaGroup.MapEntry.Builder,
+ key: String,
+ value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
builder.setStringKey(key).setValue(value).build()
- override def setLongKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder,
- key: Long,
- value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
+ override def setLongKey(
+ builder: rd.ORMapDeltaGroup.MapEntry.Builder,
+ key: Long,
+ value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
builder.setLongKey(key).setValue(value).build()
- override def setIntKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder,
- key: Int,
- value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
+ override def setIntKey(
+ builder: rd.ORMapDeltaGroup.MapEntry.Builder,
+ key: Int,
+ value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
builder.setIntKey(key).setValue(value).build()
- override def setOtherKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder,
- key: dm.OtherMessage,
- value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
+ override def setOtherKey(
+ builder: rd.ORMapDeltaGroup.MapEntry.Builder,
+ key: dm.OtherMessage,
+ value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry =
builder.setOtherKey(key).setValue(value).build()
override def hasStringKey(entry: rd.ORMapDeltaGroup.MapEntry): Boolean = entry.hasStringKey
override def getStringKey(entry: rd.ORMapDeltaGroup.MapEntry): String = entry.getStringKey
@@ -590,9 +601,10 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
lwwRegisterFromProto(rd.LWWRegister.parseFrom(bytes))
def lwwRegisterFromProto(lwwRegister: rd.LWWRegister): LWWRegister[Any] =
- new LWWRegister(uniqueAddressFromProto(lwwRegister.getNode),
- otherMessageFromProto(lwwRegister.getState),
- lwwRegister.getTimestamp)
+ new LWWRegister(
+ uniqueAddressFromProto(lwwRegister.getNode),
+ otherMessageFromProto(lwwRegister.getState),
+ lwwRegister.getTimestamp)
def gcounterToProto(gcounter: GCounter): rd.GCounter = {
val b = rd.GCounter.newBuilder()
@@ -628,20 +640,23 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
pncounterFromProto(rd.PNCounter.parseFrom(bytes))
def pncounterFromProto(pncounter: rd.PNCounter): PNCounter = {
- new PNCounter(increments = gcounterFromProto(pncounter.getIncrements),
- decrements = gcounterFromProto(pncounter.getDecrements))
+ new PNCounter(
+ increments = gcounterFromProto(pncounter.getIncrements),
+ decrements = gcounterFromProto(pncounter.getDecrements))
}
/*
* Convert a Map[A, B] to an Iterable[Entry] where Entry is the protobuf map entry.
*/
- private def getEntries[IKey,
- IValue,
- EntryBuilder <: GeneratedMessage.Builder[EntryBuilder],
- PEntry <: GeneratedMessage,
- PValue <: GeneratedMessage](input: Map[IKey, IValue],
- createBuilder: () => EntryBuilder,
- valueConverter: IValue => PValue)(
+ private def getEntries[
+ IKey,
+ IValue,
+ EntryBuilder <: GeneratedMessage.Builder[EntryBuilder],
+ PEntry <: GeneratedMessage,
+ PValue <: GeneratedMessage](
+ input: Map[IKey, IValue],
+ createBuilder: () => EntryBuilder,
+ valueConverter: IValue => PValue)(
implicit comparator: Comparator[PEntry],
eh: ProtoMapEntryWriter[PEntry, EntryBuilder, PValue]): java.lang.Iterable[PEntry] = {
// The resulting Iterable needs to be ordered deterministically in order to create same signature upon serializing same data
@@ -681,8 +696,9 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
}
def ormapFromProto(ormap: rd.ORMap): ORMap[Any, ReplicatedData] = {
- val entries = mapTypeFromProto(ormap.getEntriesList,
- (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData])
+ val entries = mapTypeFromProto(
+ ormap.getEntriesList,
+ (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData])
new ORMap(keys = orsetFromProto(ormap.getKeys), entries, ORMap.VanillaORMapTag)
}
@@ -766,25 +782,31 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
.map { entry =>
if (entry.getOperation == rd.ORMapDeltaOp.ORMapPut) {
val map =
- singleMapEntryFromProto(entry.getEntryDataList,
- (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData])
- ORMap.PutDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)),
- map.head,
- zeroTagFromCode(entry.getZeroTag))
+ singleMapEntryFromProto(
+ entry.getEntryDataList,
+ (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData])
+ ORMap.PutDeltaOp(
+ ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)),
+ map.head,
+ zeroTagFromCode(entry.getZeroTag))
} else if (entry.getOperation == rd.ORMapDeltaOp.ORMapRemove) {
- ORMap.RemoveDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)),
- zeroTagFromCode(entry.getZeroTag))
+ ORMap.RemoveDeltaOp(
+ ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)),
+ zeroTagFromCode(entry.getZeroTag))
} else if (entry.getOperation == rd.ORMapDeltaOp.ORMapRemoveKey) {
val elem = singleKeyEntryFromProto(entry.getEntryDataList.asScala.headOption)
- ORMap.RemoveKeyDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)),
- elem,
- zeroTagFromCode(entry.getZeroTag))
+ ORMap.RemoveKeyDeltaOp(
+ ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)),
+ elem,
+ zeroTagFromCode(entry.getZeroTag))
} else if (entry.getOperation == rd.ORMapDeltaOp.ORMapUpdate) {
- val map = mapTypeFromProto(entry.getEntryDataList,
- (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedDelta])
- ORMap.UpdateDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)),
- map,
- zeroTagFromCode(entry.getZeroTag))
+ val map = mapTypeFromProto(
+ entry.getEntryDataList,
+ (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedDelta])
+ ORMap.UpdateDeltaOp(
+ ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)),
+ map,
+ zeroTagFromCode(entry.getZeroTag))
} else
throw new NotSerializableException(s"Unknown ORMap delta operation ${entry.getOperation}")
}
@@ -862,16 +884,18 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
createEntry(rd.ORMapDeltaOp.ORMapPut, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, Map(pair), zt.value))
case ORMap.RemoveDeltaOp(op, zt) =>
b.addEntries(
- createEntry(rd.ORMapDeltaOp.ORMapRemove,
- op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying,
- Map.empty,
- zt.value))
+ createEntry(
+ rd.ORMapDeltaOp.ORMapRemove,
+ op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying,
+ Map.empty,
+ zt.value))
case ORMap.RemoveKeyDeltaOp(op, k, zt) =>
b.addEntries(
- createEntryWithKey(rd.ORMapDeltaOp.ORMapRemoveKey,
- op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying,
- k,
- zt.value))
+ createEntryWithKey(
+ rd.ORMapDeltaOp.ORMapRemoveKey,
+ op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying,
+ k,
+ zt.value))
case ORMap.UpdateDeltaOp(op, m, zt) =>
b.addEntries(
createEntry(rd.ORMapDeltaOp.ORMapUpdate, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, m, zt.value))
@@ -930,13 +954,15 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
if (multimap.hasWithValueDeltas)
multimap.getWithValueDeltas
else false
- new ORMultiMap(new ORMap(keys = orsetFromProto(multimap.getKeys),
- entries,
- if (withValueDeltas)
- ORMultiMap.ORMultiMapWithValueDeltasTag
- else
- ORMultiMap.ORMultiMapTag),
- withValueDeltas)
+ new ORMultiMap(
+ new ORMap(
+ keys = orsetFromProto(multimap.getKeys),
+ entries,
+ if (withValueDeltas)
+ ORMultiMap.ORMultiMapWithValueDeltasTag
+ else
+ ORMultiMap.ORMultiMapTag),
+ withValueDeltas)
}
def keyIdToBinary(id: String): Array[Byte] =
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
index 808797c3e4..f44eb0fd2d 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
@@ -184,27 +184,25 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
val DeltaPropagationManifest = "Q"
val DeltaNackManifest = "R"
- private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](GetManifest -> getFromBinary,
- GetSuccessManifest -> getSuccessFromBinary,
- NotFoundManifest -> notFoundFromBinary,
- GetFailureManifest -> getFailureFromBinary,
- SubscribeManifest -> subscribeFromBinary,
- UnsubscribeManifest -> unsubscribeFromBinary,
- ChangedManifest -> changedFromBinary,
- DataEnvelopeManifest -> dataEnvelopeFromBinary,
- WriteManifest -> writeFromBinary,
- WriteAckManifest -> (_ =>
- WriteAck),
- ReadManifest -> readFromBinary,
- ReadResultManifest -> readResultFromBinary,
- StatusManifest -> statusFromBinary,
- GossipManifest -> gossipFromBinary,
- DeltaPropagationManifest -> deltaPropagationFromBinary,
- WriteNackManifest -> (_ =>
- WriteNack),
- DeltaNackManifest -> (_ =>
- DeltaNack),
- DurableDataEnvelopeManifest -> durableDataEnvelopeFromBinary)
+ private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](
+ GetManifest -> getFromBinary,
+ GetSuccessManifest -> getSuccessFromBinary,
+ NotFoundManifest -> notFoundFromBinary,
+ GetFailureManifest -> getFailureFromBinary,
+ SubscribeManifest -> subscribeFromBinary,
+ UnsubscribeManifest -> unsubscribeFromBinary,
+ ChangedManifest -> changedFromBinary,
+ DataEnvelopeManifest -> dataEnvelopeFromBinary,
+ WriteManifest -> writeFromBinary,
+ WriteAckManifest -> (_ => WriteAck),
+ ReadManifest -> readFromBinary,
+ ReadResultManifest -> readResultFromBinary,
+ StatusManifest -> statusFromBinary,
+ GossipManifest -> gossipFromBinary,
+ DeltaPropagationManifest -> deltaPropagationFromBinary,
+ WriteNackManifest -> (_ => WriteNack),
+ DeltaNackManifest -> (_ => DeltaNack),
+ DurableDataEnvelopeManifest -> durableDataEnvelopeFromBinary)
override def manifest(obj: AnyRef): String = obj match {
case _: DataEnvelope => DataEnvelopeManifest
@@ -272,9 +270,10 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def statusFromBinary(bytes: Array[Byte]): Status = {
val status = dm.Status.parseFrom(bytes)
- Status(status.getEntriesList.asScala.iterator.map(e => e.getKey -> AkkaByteString(e.getDigest.toByteArray())).toMap,
- status.getChunk,
- status.getTotChunks)
+ Status(
+ status.getEntriesList.asScala.iterator.map(e => e.getKey -> AkkaByteString(e.getDigest.toByteArray())).toMap,
+ status.getChunk,
+ status.getTotChunks)
}
private def gossipToProto(gossip: Gossip): dm.Gossip = {
@@ -288,8 +287,9 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def gossipFromBinary(bytes: Array[Byte]): Gossip = {
val gossip = dm.Gossip.parseFrom(decompress(bytes))
- Gossip(gossip.getEntriesList.asScala.iterator.map(e => e.getKey -> dataEnvelopeFromProto(e.getEnvelope)).toMap,
- sendBack = gossip.getSendBack)
+ Gossip(
+ gossip.getEntriesList.asScala.iterator.map(e => e.getKey -> dataEnvelopeFromProto(e.getEnvelope)).toMap,
+ sendBack = gossip.getSendBack)
}
private def deltaPropagationToProto(deltaPropagation: DeltaPropagation): dm.DeltaPropagation = {
@@ -313,13 +313,14 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def deltaPropagationFromBinary(bytes: Array[Byte]): DeltaPropagation = {
val deltaPropagation = dm.DeltaPropagation.parseFrom(bytes)
val reply = deltaPropagation.hasReply && deltaPropagation.getReply
- DeltaPropagation(uniqueAddressFromProto(deltaPropagation.getFromNode),
- reply,
- deltaPropagation.getEntriesList.asScala.iterator.map { e =>
- val fromSeqNr = e.getFromSeqNr
- val toSeqNr = if (e.hasToSeqNr) e.getToSeqNr else fromSeqNr
- e.getKey -> Delta(dataEnvelopeFromProto(e.getEnvelope), fromSeqNr, toSeqNr)
- }.toMap)
+ DeltaPropagation(
+ uniqueAddressFromProto(deltaPropagation.getFromNode),
+ reply,
+ deltaPropagation.getEntriesList.asScala.iterator.map { e =>
+ val fromSeqNr = e.getFromSeqNr
+ val toSeqNr = if (e.hasToSeqNr) e.getToSeqNr else fromSeqNr
+ e.getKey -> Delta(dataEnvelopeFromProto(e.getEnvelope), fromSeqNr, toSeqNr)
+ }.toMap)
}
private def getToProto(get: Get[_]): dm.Get = {
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala
index ba1478d196..5785cc26f1 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala
@@ -98,8 +98,9 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig)
}
def newReplicator(sys: ActorSystem = system) =
- sys.actorOf(Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second)),
- "replicator-" + testStepCounter)
+ sys.actorOf(
+ Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second)),
+ "replicator-" + testStepCounter)
def join(from: RoleName, to: RoleName): Unit = {
runOn(from) {
@@ -275,9 +276,10 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig)
Await.ready(sys1.terminate(), 10.seconds)
}
- val sys2 = ActorSystem("AdditionalSys",
- // use the same port
- ConfigFactory.parseString(s"""
+ val sys2 = ActorSystem(
+ "AdditionalSys",
+ // use the same port
+ ConfigFactory.parseString(s"""
akka.remote.artery.canonical.port = ${address.port.get}
akka.remote.netty.tcp.port = ${address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala
index ddc67c7a8f..de70cd1d64 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala
@@ -49,11 +49,12 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN
val maxPruningDissemination = 3.seconds
def startReplicator(sys: ActorSystem): ActorRef =
- sys.actorOf(Replicator.props(
- ReplicatorSettings(sys)
- .withGossipInterval(1.second)
- .withPruning(pruningInterval = 1.second, maxPruningDissemination)),
- "replicator")
+ sys.actorOf(
+ Replicator.props(
+ ReplicatorSettings(sys)
+ .withGossipInterval(1.second)
+ .withPruning(pruningInterval = 1.second, maxPruningDissemination)),
+ "replicator")
val replicator = startReplicator(system)
val timeout = 5.seconds.dilated
@@ -152,8 +153,9 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN
runOn(first) {
val address = cluster2.selfAddress
- val sys3 = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val sys3 = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.artery.canonical.port = ${address.port.get}
akka.remote.netty.tcp.port = ${address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala
index b5a175bd57..ee83f1ddd1 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala
@@ -83,12 +83,14 @@ class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpe
enterBarrier(from.name + "-joined")
}
- def repeat(description: String,
- keys: Iterable[ORSetKey[Int]],
- n: Int,
- expectedAfterReplication: Option[Set[Int]] = None,
- oneByOne: Boolean = false)(block: (ORSetKey[Int], Int, ActorRef) => Unit,
- afterEachKey: ORSetKey[Int] => Unit = _ => ()): Unit = {
+ def repeat(
+ description: String,
+ keys: Iterable[ORSetKey[Int]],
+ n: Int,
+ expectedAfterReplication: Option[Set[Int]] = None,
+ oneByOne: Boolean = false)(
+ block: (ORSetKey[Int], Int, ActorRef) => Unit,
+ afterEachKey: ORSetKey[Int] => Unit = _ => ()): Unit = {
keys.foreach { key =>
val startTime = System.nanoTime()
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala
index da2e8f1658..eae37567cb 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala
@@ -41,11 +41,12 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST
val cluster = Cluster(system)
implicit val selfUniqueAddress = DistributedData(system).selfUniqueAddress
val maxPruningDissemination = 3.seconds
- val replicator = system.actorOf(Replicator.props(
- ReplicatorSettings(system)
- .withGossipInterval(1.second)
- .withPruning(pruningInterval = 1.second, maxPruningDissemination)),
- "replicator")
+ val replicator = system.actorOf(
+ Replicator.props(
+ ReplicatorSettings(system)
+ .withGossipInterval(1.second)
+ .withPruning(pruningInterval = 1.second, maxPruningDissemination)),
+ "replicator")
val timeout = 3.seconds.dilated
val KeyA = GCounterKey("A")
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala
index e5744e2348..dfd3e3d88b 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala
@@ -53,9 +53,10 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
selector.hasDeltaEntries("A") should ===(true)
selector.hasDeltaEntries("B") should ===(true)
val expected =
- DeltaPropagation(selfUniqueAddress,
- false,
- Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L)))
+ DeltaPropagation(
+ selfUniqueAddress,
+ false,
+ Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(0) -> expected))
selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation])
selector.cleanupDeltaEntries()
@@ -68,9 +69,10 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
selector.update("A", deltaA)
selector.update("B", deltaB)
val expected =
- DeltaPropagation(selfUniqueAddress,
- false,
- Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L)))
+ DeltaPropagation(
+ selfUniqueAddress,
+ false,
+ Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(0) -> expected, nodes(1) -> expected))
selector.cleanupDeltaEntries()
selector.hasDeltaEntries("A") should ===(true)
@@ -87,17 +89,20 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
selector.update("A", deltaA)
selector.update("B", deltaB)
val expected1 =
- DeltaPropagation(selfUniqueAddress,
- false,
- Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L)))
+ DeltaPropagation(
+ selfUniqueAddress,
+ false,
+ Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(0) -> expected1, nodes(1) -> expected1))
// new update before previous was propagated to all nodes
selector.update("C", deltaC)
- val expected2 = DeltaPropagation(selfUniqueAddress,
- false,
- Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L),
- "B" -> Delta(DataEnvelope(deltaB), 1L, 1L),
- "C" -> Delta(DataEnvelope(deltaC), 1L, 1L)))
+ val expected2 = DeltaPropagation(
+ selfUniqueAddress,
+ false,
+ Map(
+ "A" -> Delta(DataEnvelope(deltaA), 1L, 1L),
+ "B" -> Delta(DataEnvelope(deltaB), 1L, 1L),
+ "C" -> Delta(DataEnvelope(deltaC), 1L, 1L)))
val expected3 = DeltaPropagation(selfUniqueAddress, false, Map("C" -> Delta(DataEnvelope(deltaC), 1L, 1L)))
selector.collectPropagations() should ===(Map(nodes(2) -> expected2, nodes(0) -> expected3))
selector.cleanupDeltaEntries()
@@ -146,9 +151,10 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
selector.collectPropagations() should ===(Map(nodes(1) -> expected2))
selector.update("A", delta3)
- val expected3 = DeltaPropagation(selfUniqueAddress,
- false,
- Map("A" -> Delta(DataEnvelope(delta1.merge(delta2).merge(delta3)), 1L, 3L)))
+ val expected3 = DeltaPropagation(
+ selfUniqueAddress,
+ false,
+ Map("A" -> Delta(DataEnvelope(delta1.merge(delta2).merge(delta3)), 1L, 3L)))
selector.collectPropagations() should ===(Map(nodes(2) -> expected3))
val expected4 =
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala
index 87740abc0c..6713944992 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala
@@ -47,8 +47,9 @@ class LocalConcurrencySpec(_system: ActorSystem)
def this() {
this(
- ActorSystem("LocalConcurrencySpec",
- ConfigFactory.parseString("""
+ ActorSystem(
+ "LocalConcurrencySpec",
+ ConfigFactory.parseString("""
akka.actor.provider = "cluster"
akka.remote.netty.tcp.port=0
akka.remote.artery.canonical.port = 0
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
index 24a7c82ad4..d730d8569d 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
@@ -511,9 +511,10 @@ class ORSetSpec extends WordSpec with Matchers {
"verify mergeDisjointKeys" in {
val keys: Set[Any] = Set("K3", "K4", "K5")
- val elements: Map[Any, VersionVector] = Map("K3" -> VersionVector(nodeA, 4L),
- "K4" -> VersionVector(TreeMap(nodeA -> 3L, nodeD -> 8L)),
- "K5" -> VersionVector(nodeA, 2L))
+ val elements: Map[Any, VersionVector] = Map(
+ "K3" -> VersionVector(nodeA, 4L),
+ "K4" -> VersionVector(TreeMap(nodeA -> 3L, nodeD -> 8L)),
+ "K5" -> VersionVector(nodeA, 2L))
val vvector = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L))
val acc: Map[Any, VersionVector] = Map("K1" -> VersionVector(nodeA, 3L))
val expectedDots = acc ++ Map("K3" -> VersionVector(nodeA, 4L), "K4" -> VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala
index 4196c63f42..c2fa51d70b 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala
@@ -24,34 +24,37 @@ object WriteAggregatorSpec {
val KeyA = GSetKey[String]("A")
val KeyB = ORSetKey[String]("B")
- def writeAggregatorProps(data: GSet[String],
- consistency: Replicator.WriteConsistency,
- probes: Map[Address, ActorRef],
- nodes: Set[Address],
- unreachable: Set[Address],
- replyTo: ActorRef,
- durable: Boolean): Props =
+ def writeAggregatorProps(
+ data: GSet[String],
+ consistency: Replicator.WriteConsistency,
+ probes: Map[Address, ActorRef],
+ nodes: Set[Address],
+ unreachable: Set[Address],
+ replyTo: ActorRef,
+ durable: Boolean): Props =
Props(new TestWriteAggregator(KeyA, data, None, consistency, probes, nodes, unreachable, replyTo, durable))
- def writeAggregatorPropsWithDelta(data: ORSet[String],
- delta: Delta,
- consistency: Replicator.WriteConsistency,
- probes: Map[Address, ActorRef],
- nodes: Set[Address],
- unreachable: Set[Address],
- replyTo: ActorRef,
- durable: Boolean): Props =
+ def writeAggregatorPropsWithDelta(
+ data: ORSet[String],
+ delta: Delta,
+ consistency: Replicator.WriteConsistency,
+ probes: Map[Address, ActorRef],
+ nodes: Set[Address],
+ unreachable: Set[Address],
+ replyTo: ActorRef,
+ durable: Boolean): Props =
Props(new TestWriteAggregator(KeyB, data, Some(delta), consistency, probes, nodes, unreachable, replyTo, durable))
- class TestWriteAggregator(key: Key.KeyR,
- data: ReplicatedData,
- delta: Option[Delta],
- consistency: Replicator.WriteConsistency,
- probes: Map[Address, ActorRef],
- nodes: Set[Address],
- unreachable: Set[Address],
- replyTo: ActorRef,
- durable: Boolean)
+ class TestWriteAggregator(
+ key: Key.KeyR,
+ data: ReplicatedData,
+ delta: Option[Delta],
+ consistency: Replicator.WriteConsistency,
+ probes: Map[Address, ActorRef],
+ nodes: Set[Address],
+ unreachable: Set[Address],
+ replyTo: ActorRef,
+ durable: Boolean)
extends WriteAggregator(key, DataEnvelope(data), delta, consistency, None, nodes, unreachable, replyTo, durable) {
override def replica(address: Address): ActorSelection =
@@ -215,14 +218,15 @@ class WriteAggregatorSpec extends AkkaSpec(s"""
"send deltas first" in {
val probe = TestProbe()
val aggr = system.actorOf(
- WriteAggregatorSpec.writeAggregatorPropsWithDelta(fullState2,
- delta,
- writeMajority,
- probes(probe.ref),
- nodes,
- Set.empty,
- testActor,
- durable = false))
+ WriteAggregatorSpec.writeAggregatorPropsWithDelta(
+ fullState2,
+ delta,
+ writeMajority,
+ probes(probe.ref),
+ nodes,
+ Set.empty,
+ testActor,
+ durable = false))
probe.expectMsgType[DeltaPropagation]
probe.lastSender ! WriteAck
@@ -237,14 +241,15 @@ class WriteAggregatorSpec extends AkkaSpec(s"""
val testProbes = probes()
val testProbeRefs = testProbes.map { case (a, tm) => a -> tm.writeAckAdapter }
val aggr = system.actorOf(
- WriteAggregatorSpec.writeAggregatorPropsWithDelta(fullState2,
- delta,
- writeAll,
- testProbeRefs,
- nodes,
- Set.empty,
- testActor,
- durable = false))
+ WriteAggregatorSpec.writeAggregatorPropsWithDelta(
+ fullState2,
+ delta,
+ writeAll,
+ testProbeRefs,
+ nodes,
+ Set.empty,
+ testActor,
+ durable = false))
testProbes(nodeA).expectMsgType[DeltaPropagation]
// no reply
@@ -271,14 +276,15 @@ class WriteAggregatorSpec extends AkkaSpec(s"""
"timeout when less than required acks" in {
val probe = TestProbe()
val aggr = system.actorOf(
- WriteAggregatorSpec.writeAggregatorPropsWithDelta(fullState2,
- delta,
- writeAll,
- probes(probe.ref),
- nodes,
- Set.empty,
- testActor,
- durable = false))
+ WriteAggregatorSpec.writeAggregatorPropsWithDelta(
+ fullState2,
+ delta,
+ writeAll,
+ probes(probe.ref),
+ nodes,
+ Set.empty,
+ testActor,
+ durable = false))
probe.expectMsgType[DeltaPropagation]
// no reply
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
index acb8773e8c..c2deecbac0 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
@@ -28,8 +28,9 @@ import akka.testkit.TestActors
class ReplicatedDataSerializerSpec
extends TestKit(
- ActorSystem("ReplicatedDataSerializerSpec",
- ConfigFactory.parseString("""
+ ActorSystem(
+ "ReplicatedDataSerializerSpec",
+ ConfigFactory.parseString("""
akka.loglevel = DEBUG
akka.actor.provider=cluster
akka.remote.netty.tcp.port=0
@@ -202,10 +203,12 @@ class ReplicatedDataSerializerSpec
checkSerialization(GCounter().increment(address1, 3))
checkSerialization(GCounter().increment(address1, 2).increment(address2, 5))
- checkSameContent(GCounter().increment(address1, 2).increment(address2, 5),
- GCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1))
- checkSameContent(GCounter().increment(address1, 2).increment(address3, 5),
- GCounter().increment(address3, 5).increment(address1, 2))
+ checkSameContent(
+ GCounter().increment(address1, 2).increment(address2, 5),
+ GCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1))
+ checkSameContent(
+ GCounter().increment(address1, 2).increment(address3, 5),
+ GCounter().increment(address3, 5).increment(address1, 2))
}
"serialize PNCounter" in {
@@ -215,12 +218,15 @@ class ReplicatedDataSerializerSpec
checkSerialization(PNCounter().increment(address1, 2).increment(address2, 5))
checkSerialization(PNCounter().increment(address1, 2).increment(address2, 5).decrement(address1, 1))
- checkSameContent(PNCounter().increment(address1, 2).increment(address2, 5),
- PNCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1))
- checkSameContent(PNCounter().increment(address1, 2).increment(address3, 5),
- PNCounter().increment(address3, 5).increment(address1, 2))
- checkSameContent(PNCounter().increment(address1, 2).decrement(address1, 1).increment(address3, 5),
- PNCounter().increment(address3, 5).increment(address1, 2).decrement(address1, 1))
+ checkSameContent(
+ PNCounter().increment(address1, 2).increment(address2, 5),
+ PNCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1))
+ checkSameContent(
+ PNCounter().increment(address1, 2).increment(address3, 5),
+ PNCounter().increment(address3, 5).increment(address1, 2))
+ checkSameContent(
+ PNCounter().increment(address1, 2).decrement(address1, 1).increment(address3, 5),
+ PNCounter().increment(address3, 5).increment(address1, 2).decrement(address1, 1))
}
"serialize ORMap" in {
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
index ffc12b7889..5e5e525253 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
@@ -31,8 +31,9 @@ import akka.cluster.ddata.ORMultiMap
class ReplicatorMessageSerializerSpec
extends TestKit(
- ActorSystem("ReplicatorMessageSerializerSpec",
- ConfigFactory.parseString("""
+ ActorSystem(
+ "ReplicatorMessageSerializerSpec",
+ ConfigFactory.parseString("""
akka.actor.provider=cluster
akka.remote.netty.tcp.port=0
akka.remote.artery.canonical.port = 0
@@ -87,9 +88,11 @@ class ReplicatorMessageSerializerSpec
checkSerialization(Changed(keyA)(data1))
checkSerialization(DataEnvelope(data1))
checkSerialization(
- DataEnvelope(data1,
- pruning = Map(address1 -> PruningPerformed(System.currentTimeMillis()),
- address3 -> PruningInitialized(address2, Set(address1.address)))))
+ DataEnvelope(
+ data1,
+ pruning = Map(
+ address1 -> PruningPerformed(System.currentTimeMillis()),
+ address3 -> PruningInitialized(address2, Set(address1.address)))))
checkSerialization(Write("A", DataEnvelope(data1)))
checkSerialization(WriteAck)
checkSerialization(WriteNack)
@@ -102,16 +105,19 @@ class ReplicatorMessageSerializerSpec
checkSerialization(
Gossip(Map("A" -> DataEnvelope(data1), "B" -> DataEnvelope(GSet() + "b" + "c")), sendBack = true))
checkSerialization(
- DeltaPropagation(address1,
- reply = true,
- Map("A" -> Delta(DataEnvelope(delta1), 1L, 1L),
- "B" -> Delta(DataEnvelope(delta2), 3L, 5L),
- "C" -> Delta(DataEnvelope(delta3), 1L, 1L),
- "DC" -> Delta(DataEnvelope(delta4), 1L, 1L))))
+ DeltaPropagation(
+ address1,
+ reply = true,
+ Map(
+ "A" -> Delta(DataEnvelope(delta1), 1L, 1L),
+ "B" -> Delta(DataEnvelope(delta2), 3L, 5L),
+ "C" -> Delta(DataEnvelope(delta3), 1L, 1L),
+ "DC" -> Delta(DataEnvelope(delta4), 1L, 1L))))
checkSerialization(new DurableDataEnvelope(data1))
- val pruning = Map(address1 -> PruningPerformed(System.currentTimeMillis()),
- address3 -> PruningInitialized(address2, Set(address1.address)))
+ val pruning = Map(
+ address1 -> PruningPerformed(System.currentTimeMillis()),
+ address3 -> PruningInitialized(address2, Set(address1.address)))
val deserializedDurableDataEnvelope =
checkSerialization(
new DurableDataEnvelope(DataEnvelope(data1, pruning, deltaVersions = VersionVector(address1, 13L))))
diff --git a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala
index 966638c920..48dd2f0463 100644
--- a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala
@@ -110,8 +110,9 @@ class FaultHandlingDocSpec(_system: ActorSystem)
def this() =
this(
- ActorSystem("FaultHandlingDocSpec",
- ConfigFactory.parseString("""
+ ActorSystem(
+ "FaultHandlingDocSpec",
+ ConfigFactory.parseString("""
akka {
loggers = ["akka.testkit.TestEventListener"]
loglevel = "WARNING"
diff --git a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala
index 3975f6044d..a6d4b9a9b7 100644
--- a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala
+++ b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala
@@ -74,11 +74,13 @@ abstract class FactorialFrontend2 extends Actor {
import akka.cluster.metrics.HeapMetricsSelector
val backend = context.actorOf(
- ClusterRouterGroup(AdaptiveLoadBalancingGroup(HeapMetricsSelector),
- ClusterRouterGroupSettings(totalInstances = 100,
- routeesPaths = List("/user/factorialBackend"),
- allowLocalRoutees = true,
- useRoles = Set("backend"))).props(),
+ ClusterRouterGroup(
+ AdaptiveLoadBalancingGroup(HeapMetricsSelector),
+ ClusterRouterGroupSettings(
+ totalInstances = 100,
+ routeesPaths = List("/user/factorialBackend"),
+ allowLocalRoutees = true,
+ useRoles = Set("backend"))).props(),
name = "factorialBackendRouter2")
//#router-lookup-in-code
@@ -93,11 +95,13 @@ abstract class FactorialFrontend3 extends Actor {
import akka.cluster.metrics.SystemLoadAverageMetricsSelector
val backend = context.actorOf(
- ClusterRouterPool(AdaptiveLoadBalancingPool(SystemLoadAverageMetricsSelector),
- ClusterRouterPoolSettings(totalInstances = 100,
- maxInstancesPerNode = 3,
- allowLocalRoutees = false,
- useRoles = Set("backend"))).props(Props[FactorialBackend]),
+ ClusterRouterPool(
+ AdaptiveLoadBalancingPool(SystemLoadAverageMetricsSelector),
+ ClusterRouterPoolSettings(
+ totalInstances = 100,
+ maxInstancesPerNode = 3,
+ allowLocalRoutees = false,
+ useRoles = Set("backend"))).props(Props[FactorialBackend]),
name = "factorialBackendRouter3")
//#router-deploy-in-code
}
diff --git a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala
index 1bdcf36aa8..24499eadbb 100644
--- a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala
+++ b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala
@@ -23,9 +23,10 @@ abstract class ClusterSingletonSupervision extends Actor {
import akka.actor.{ PoisonPill, Props }
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
context.system.actorOf(
- ClusterSingletonManager.props(singletonProps = Props(classOf[SupervisorActor], props, supervisorStrategy),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(context.system)),
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[SupervisorActor], props, supervisorStrategy),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(context.system)),
name = name)
//#singleton-supervisor-actor-usage
}
diff --git a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala
index 49096958f2..bf3c1f2025 100644
--- a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala
@@ -19,11 +19,12 @@ class BackoffSupervisorDocSpec {
val childProps = Props(classOf[EchoActor])
val supervisor = BackoffSupervisor.props(
- BackoffOpts.onStop(childProps,
- childName = "myEcho",
- minBackoff = 3.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
+ BackoffOpts.onStop(
+ childProps,
+ childName = "myEcho",
+ minBackoff = 3.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
))
system.actorOf(supervisor, name = "echoSupervisor")
@@ -38,11 +39,12 @@ class BackoffSupervisorDocSpec {
val childProps = Props(classOf[EchoActor])
val supervisor = BackoffSupervisor.props(
- BackoffOpts.onFailure(childProps,
- childName = "myEcho",
- minBackoff = 3.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
+ BackoffOpts.onFailure(
+ childProps,
+ childName = "myEcho",
+ minBackoff = 3.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
))
system.actorOf(supervisor, name = "echoSupervisor")
@@ -58,11 +60,12 @@ class BackoffSupervisorDocSpec {
//#backoff-custom-stop
val supervisor = BackoffSupervisor.props(
BackoffOpts
- .onStop(childProps,
- childName = "myEcho",
- minBackoff = 3.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
+ .onStop(
+ childProps,
+ childName = "myEcho",
+ minBackoff = 3.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
)
.withManualReset // the child must send BackoffSupervisor.Reset to its parent
.withDefaultStoppingStrategy // Stop at any Exception thrown
@@ -81,11 +84,12 @@ class BackoffSupervisorDocSpec {
//#backoff-custom-fail
val supervisor = BackoffSupervisor.props(
BackoffOpts
- .onFailure(childProps,
- childName = "myEcho",
- minBackoff = 3.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
+ .onFailure(
+ childProps,
+ childName = "myEcho",
+ minBackoff = 3.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
)
.withAutoReset(10.seconds) // reset if the child does not throw any errors within 10 seconds
.withSupervisorStrategy(OneForOneStrategy() {
diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala
index 819ce0a156..6f81d3b1cb 100644
--- a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala
@@ -74,18 +74,20 @@ object PersistenceMultiDocSpec {
override def journalPluginConfig =
ConfigFactory
.empty()
- .withValue(s"journal-plugin-$runtimeDistinction",
- context.system.settings.config
- .getValue("journal-plugin") // or a very different configuration coming from an external service.
+ .withValue(
+ s"journal-plugin-$runtimeDistinction",
+ context.system.settings.config
+ .getValue("journal-plugin") // or a very different configuration coming from an external service.
)
// Configuration which contains the snapshot store plugin id defined above
override def snapshotPluginConfig =
ConfigFactory
.empty()
- .withValue(s"snapshot-plugin-$runtimeDistinction",
- context.system.settings.config
- .getValue("snapshot-store-plugin") // or a very different configuration coming from an external service.
+ .withValue(
+ s"snapshot-plugin-$runtimeDistinction",
+ context.system.settings.config
+ .getValue("snapshot-store-plugin") // or a very different configuration coming from an external service.
)
}
diff --git a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala
index 794ae83b2b..f588cc06c5 100644
--- a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala
@@ -93,10 +93,11 @@ class PersistencePluginDocSpec extends WordSpec {
//#snapshot-store-plugin-config
"""
- val system = ActorSystem("PersistencePluginDocSpec",
- ConfigFactory
- .parseString(providerConfig)
- .withFallback(ConfigFactory.parseString(PersistencePluginDocSpec.config)))
+ val system = ActorSystem(
+ "PersistencePluginDocSpec",
+ ConfigFactory
+ .parseString(providerConfig)
+ .withFallback(ConfigFactory.parseString(PersistencePluginDocSpec.config)))
try {
Persistence(system)
} finally {
@@ -227,8 +228,9 @@ object PersistenceTCKDoc {
override def supportsRejectingNonSerializableObjects: CapabilityFlag =
true // or CapabilityFlag.on
- val storageLocations = List(new File(system.settings.config.getString("akka.persistence.journal.leveldb.dir")),
- new File(config.getString("akka.persistence.snapshot-store.local.dir")))
+ val storageLocations = List(
+ new File(system.settings.config.getString("akka.persistence.journal.leveldb.dir")),
+ new File(config.getString("akka.persistence.snapshot-store.local.dir")))
override def beforeAll(): Unit = {
super.beforeAll()
diff --git a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala
index 2a63b8de9a..68cb1dd369 100644
--- a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala
@@ -73,9 +73,10 @@ object PersistenceQueryDocSpec {
throw new IllegalArgumentException("LevelDB does not support " + offset.getClass.getName + " offsets")
}
- override def eventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long): Source[EventEnvelope, NotUsed] = {
+ override def eventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long): Source[EventEnvelope, NotUsed] = {
// implement in a similar way as eventsByTag
???
}
@@ -111,9 +112,10 @@ object PersistenceQueryDocSpec {
override def eventsByTag(tag: String, offset: Offset = Sequence(0L)): javadsl.Source[EventEnvelope, NotUsed] =
scaladslReadJournal.eventsByTag(tag, offset).asJava
- override def eventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long = 0L,
- toSequenceNr: Long = Long.MaxValue): javadsl.Source[EventEnvelope, NotUsed] =
+ override def eventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long = 0L,
+ toSequenceNr: Long = Long.MaxValue): javadsl.Source[EventEnvelope, NotUsed] =
scaladslReadJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
override def persistenceIds(): javadsl.Source[String, NotUsed] =
diff --git a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala
index 0bcbfa0e10..1781f39170 100644
--- a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala
@@ -400,8 +400,9 @@ router-dispatcher {}
for (i <- 1 to 100) router10b ! i
val threads10b = Thread.getAllStackTraces.keySet.asScala.filter { _.getName contains "router10b" }
val threads10bNr = threads10b.size
- require(threads10bNr == 5,
- s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}")
+ require(
+ threads10bNr == 5,
+ s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}")
//#smallest-mailbox-pool-1
val router11: ActorRef =
@@ -537,10 +538,10 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender {
"demonstrate dispatcher" in {
//#dispatchers
val router: ActorRef = system.actorOf(
- // “head” router actor will run on "router-dispatcher" dispatcher
- // Worker routees will run on "pool-dispatcher" dispatcher
- RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]),
- name = "poolWithDispatcher")
+ // “head” router actor will run on "router-dispatcher" dispatcher
+ // Worker routees will run on "pool-dispatcher" dispatcher
+ RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]),
+ name = "poolWithDispatcher")
//#dispatchers
}
@@ -595,8 +596,9 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender {
//#remoteRoutees
import akka.actor.{ Address, AddressFromURIString }
import akka.remote.routing.RemoteRouterConfig
- val addresses = Seq(Address("akka.tcp", "remotesys", "otherhost", 1234),
- AddressFromURIString("akka.tcp://othersys@anotherhost:1234"))
+ val addresses = Seq(
+ Address("akka.tcp", "remotesys", "otherhost", 1234),
+ AddressFromURIString("akka.tcp://othersys@anotherhost:1234"))
val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo]))
//#remoteRoutees
}
diff --git a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala
index fab5229d40..d873b75e74 100644
--- a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala
@@ -117,8 +117,9 @@ class GraphDSLDocSpec extends AkkaSpec {
//#graph-dsl-components-create
object PriorityWorkerPool {
- def apply[In, Out](worker: Flow[In, Out, Any],
- workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = {
+ def apply[In, Out](
+ worker: Flow[In, Out, Any],
+ workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = {
GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
@@ -138,9 +139,10 @@ class GraphDSLDocSpec extends AkkaSpec {
// We now expose the input ports of the priorityMerge and the output
// of the resultsMerge as our PriorityWorkerPool ports
// -- all neatly wrapped in our domain specific Shape
- PriorityWorkerPoolShape(jobsIn = priorityMerge.in(0),
- priorityJobsIn = priorityMerge.preferred,
- resultsOut = resultsMerge.out)
+ PriorityWorkerPoolShape(
+ jobsIn = priorityMerge.in(0),
+ priorityJobsIn = priorityMerge.preferred,
+ resultsOut = resultsMerge.out)
}
}
diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala
index 07ff90e6f8..80b9e722a6 100644
--- a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala
@@ -416,21 +416,20 @@ class GraphStageDocSpec extends AkkaSpec {
val promise = Promise[A]()
val logic = new GraphStageLogic(shape) {
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = {
- val elem = grab(in)
- promise.success(elem)
- push(out, elem)
+ setHandler(in, new InHandler {
+ override def onPush(): Unit = {
+ val elem = grab(in)
+ promise.success(elem)
+ push(out, elem)
- // replace handler with one that only forwards elements
- setHandler(in, new InHandler {
- override def onPush(): Unit = {
- push(out, grab(in))
- }
- })
- }
- })
+ // replace handler with one that only forwards elements
+ setHandler(in, new InHandler {
+ override def onPush(): Unit = {
+ push(out, grab(in))
+ }
+ })
+ }
+ })
setHandler(out, new OutHandler {
override def onPull(): Unit = {
@@ -477,44 +476,46 @@ class GraphStageDocSpec extends AkkaSpec {
pull(in)
}
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = {
- val elem = grab(in)
- buffer.enqueue(elem)
- if (downstreamWaiting) {
- downstreamWaiting = false
- val bufferedElem = buffer.dequeue()
- push(out, bufferedElem)
- }
- if (!bufferFull) {
- pull(in)
- }
- }
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush(): Unit = {
+ val elem = grab(in)
+ buffer.enqueue(elem)
+ if (downstreamWaiting) {
+ downstreamWaiting = false
+ val bufferedElem = buffer.dequeue()
+ push(out, bufferedElem)
+ }
+ if (!bufferFull) {
+ pull(in)
+ }
+ }
- override def onUpstreamFinish(): Unit = {
- if (buffer.nonEmpty) {
- // emit the rest if possible
- emitMultiple(out, buffer.toIterator)
- }
- completeStage()
- }
- })
+ override def onUpstreamFinish(): Unit = {
+ if (buffer.nonEmpty) {
+ // emit the rest if possible
+ emitMultiple(out, buffer.toIterator)
+ }
+ completeStage()
+ }
+ })
- setHandler(out,
- new OutHandler {
- override def onPull(): Unit = {
- if (buffer.isEmpty) {
- downstreamWaiting = true
- } else {
- val elem = buffer.dequeue
- push(out, elem)
- }
- if (!bufferFull && !hasBeenPulled(in)) {
- pull(in)
- }
- }
- })
+ setHandler(
+ out,
+ new OutHandler {
+ override def onPull(): Unit = {
+ if (buffer.isEmpty) {
+ downstreamWaiting = true
+ } else {
+ val elem = buffer.dequeue
+ push(out, elem)
+ }
+ if (!bufferFull && !hasBeenPulled(in)) {
+ pull(in)
+ }
+ }
+ })
}
}
diff --git a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala
index 5a9696629e..51f86cb06f 100644
--- a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala
@@ -113,9 +113,10 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec {
// value to the left is used)
val runnableGraph: RunnableGraph[Source[String, NotUsed]] =
producer.toMat(
- PartitionHub.sink((size, elem) => math.abs(elem.hashCode % size),
- startAfterNrOfConsumers = 2,
- bufferSize = 256))(Keep.right)
+ PartitionHub.sink(
+ (size, elem) => math.abs(elem.hashCode % size),
+ startAfterNrOfConsumers = 2,
+ bufferSize = 256))(Keep.right)
// By running/materializing the producer, we get back a Source, which
// gives us access to the elements published by the producer.
@@ -169,9 +170,10 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec {
// Note that this is a moving target since the elements are consumed concurrently.
val runnableGraph: RunnableGraph[Source[Int, NotUsed]] =
producer.toMat(
- PartitionHub.statefulSink(() => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)),
- startAfterNrOfConsumers = 2,
- bufferSize = 16))(Keep.right)
+ PartitionHub.statefulSink(
+ () => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)),
+ startAfterNrOfConsumers = 2,
+ bufferSize = 16))(Keep.right)
val fromProducer: Source[Int, NotUsed] = runnableGraph.run()
diff --git a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala
index 95d2fa0f22..7c38e8f156 100644
--- a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala
@@ -206,11 +206,12 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
val probe = TestProbe()
val receiver = system.actorOf(Props(new AckingReceiver(probe.ref, ackWith = AckMessage)))
- val sink = Sink.actorRefWithAck(receiver,
- onInitMessage = InitMessage,
- ackMessage = AckMessage,
- onCompleteMessage = OnCompleteMessage,
- onFailureMessage = onErrorMessage)
+ val sink = Sink.actorRefWithAck(
+ receiver,
+ onInitMessage = InitMessage,
+ ackMessage = AckMessage,
+ onCompleteMessage = OnCompleteMessage,
+ onFailureMessage = onErrorMessage)
words.map(_.toLowerCase).runWith(sink)
@@ -295,13 +296,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
//#external-service-mapAsyncUnordered
probe.receiveN(7).toSet should be(
- Set("rolandkuhn@somewhere.com",
- "patriknw@somewhere.com",
- "bantonsson@somewhere.com",
- "drewhk@somewhere.com",
- "ktosopl@somewhere.com",
- "mmartynas@somewhere.com",
- "akkateam@somewhere.com"))
+ Set(
+ "rolandkuhn@somewhere.com",
+ "patriknw@somewhere.com",
+ "bantonsson@somewhere.com",
+ "drewhk@somewhere.com",
+ "ktosopl@somewhere.com",
+ "mmartynas@somewhere.com",
+ "akkateam@somewhere.com"))
}
"careful managed blocking with mapAsync" in {
@@ -332,13 +334,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
//#blocking-mapAsync
probe.receiveN(7).toSet should be(
- Set("rolandkuhn".hashCode.toString,
- "patriknw".hashCode.toString,
- "bantonsson".hashCode.toString,
- "drewhk".hashCode.toString,
- "ktosopl".hashCode.toString,
- "mmartynas".hashCode.toString,
- "akkateam".hashCode.toString))
+ Set(
+ "rolandkuhn".hashCode.toString,
+ "patriknw".hashCode.toString,
+ "bantonsson".hashCode.toString,
+ "drewhk".hashCode.toString,
+ "ktosopl".hashCode.toString,
+ "mmartynas".hashCode.toString,
+ "akkateam".hashCode.toString))
}
"careful managed blocking with map" in {
@@ -452,16 +455,17 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) {
//#sometimes-slow-mapAsyncUnordered
probe.receiveN(10).toSet should be(
- Set("after: A",
- "after: B",
- "after: C",
- "after: D",
- "after: E",
- "after: F",
- "after: G",
- "after: H",
- "after: I",
- "after: J"))
+ Set(
+ "after: A",
+ "after: B",
+ "after: C",
+ "after: D",
+ "after: E",
+ "after: F",
+ "after: G",
+ "after: H",
+ "after: I",
+ "after: J"))
}
"illustrate use of source queue" in {
diff --git a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala
index 6974b20478..ec2e1d1724 100644
--- a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala
@@ -35,10 +35,11 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec {
"demonstrate a restart with backoff source" in compileOnlySpec {
//#restart-with-backoff-source
- val restartSource = RestartSource.withBackoff(minBackoff = 3.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
- maxRestarts = 20 // limits the amount of restarts to 20
+ val restartSource = RestartSource.withBackoff(
+ minBackoff = 3.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
+ maxRestarts = 20 // limits the amount of restarts to 20
) { () =>
// Create a source from a future of a source
Source.fromFutureSource {
diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala
index c214349fe1..3453cda44f 100644
--- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala
+++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala
@@ -35,27 +35,28 @@ class RecipeByteStrings extends RecipeSpec {
emitChunk()
}
})
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = {
- val elem = grab(in)
- buffer ++= elem
- emitChunk()
- }
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush(): Unit = {
+ val elem = grab(in)
+ buffer ++= elem
+ emitChunk()
+ }
- override def onUpstreamFinish(): Unit = {
- if (buffer.isEmpty) completeStage()
- else {
- // There are elements left in buffer, so
- // we keep accepting downstream pulls and push from buffer until emptied.
- //
- // It might be though, that the upstream finished while it was pulled, in which
- // case we will not get an onPull from the downstream, because we already had one.
- // In that case we need to emit from the buffer.
- if (isAvailable(out)) emitChunk()
- }
- }
- })
+ override def onUpstreamFinish(): Unit = {
+ if (buffer.isEmpty) completeStage()
+ else {
+ // There are elements left in buffer, so
+ // we keep accepting downstream pulls and push from buffer until emptied.
+ //
+ // It might be though, that the upstream finished while it was pulled, in which
+ // case we will not get an onPull from the downstream, because we already had one.
+ // In that case we need to emit from the buffer.
+ if (isAvailable(out)) emitChunk()
+ }
+ }
+ })
private def emitChunk(): Unit = {
if (buffer.isEmpty) {
@@ -93,21 +94,19 @@ class RecipeByteStrings extends RecipeSpec {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
private var count = 0
- setHandlers(in,
- out,
- new InHandler with OutHandler {
+ setHandlers(in, out, new InHandler with OutHandler {
- override def onPull(): Unit = {
- pull(in)
- }
+ override def onPull(): Unit = {
+ pull(in)
+ }
- override def onPush(): Unit = {
- val chunk = grab(in)
- count += chunk.size
- if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes"))
- else push(out, chunk)
- }
- })
+ override def onPush(): Unit = {
+ val chunk = grab(in)
+ count += chunk.size
+ if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes"))
+ else push(out, chunk)
+ }
+ })
}
}
diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala
index 589757b30f..2569f6657a 100644
--- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala
+++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala
@@ -37,10 +37,11 @@ class RecipeGlobalRateLimit extends RecipeSpec {
private var waitQueue = immutable.Queue.empty[ActorRef]
private var permitTokens = maxAvailableTokens
- private val replenishTimer = system.scheduler.schedule(initialDelay = tokenRefreshPeriod,
- interval = tokenRefreshPeriod,
- receiver = self,
- ReplenishTokens)
+ private val replenishTimer = system.scheduler.schedule(
+ initialDelay = tokenRefreshPeriod,
+ interval = tokenRefreshPeriod,
+ receiver = self,
+ ReplenishTokens)
override def receive: Receive = open
diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala
index d1ca4482ad..c7679c12a7 100644
--- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala
+++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala
@@ -55,22 +55,23 @@ object HoldOps {
private var currentValue: T = _
private var waitingFirstValue = true
- setHandlers(in,
- out,
- new InHandler with OutHandler {
- override def onPush(): Unit = {
- currentValue = grab(in)
- if (waitingFirstValue) {
- waitingFirstValue = false
- if (isAvailable(out)) push(out, currentValue)
- }
- pull(in)
- }
+ setHandlers(
+ in,
+ out,
+ new InHandler with OutHandler {
+ override def onPush(): Unit = {
+ currentValue = grab(in)
+ if (waitingFirstValue) {
+ waitingFirstValue = false
+ if (isAvailable(out)) push(out, currentValue)
+ }
+ pull(in)
+ }
- override def onPull(): Unit = {
- if (!waitingFirstValue) push(out, currentValue)
- }
- })
+ override def onPull(): Unit = {
+ if (!waitingFirstValue) push(out, currentValue)
+ }
+ })
override def preStart(): Unit = {
pull(in)
diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala
index dfffd4245e..f02b55faaa 100644
--- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala
+++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala
@@ -17,11 +17,12 @@ class RecipeParseLines extends RecipeSpec {
"work" in {
val rawData = Source(
- List(ByteString("Hello World"),
- ByteString("\r"),
- ByteString("!\r"),
- ByteString("\nHello Akka!\r\nHello Streams!"),
- ByteString("\r\n\r\n")))
+ List(
+ ByteString("Hello World"),
+ ByteString("\r"),
+ ByteString("!\r"),
+ ByteString("\nHello Akka!\r\nHello Streams!"),
+ ByteString("\r\n\r\n")))
//#parse-lines
import akka.stream.scaladsl.Framing
diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala
index 6641710428..c5651dbed8 100644
--- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala
+++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala
@@ -40,9 +40,8 @@ class RecipeReduceByKey extends RecipeSpec {
def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!"))
//#reduce-by-key-general
- def reduceByKey[In, K, Out](maximumGroupSize: Int,
- groupKey: (In) => K,
- map: (In) => Out)(reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
+ def reduceByKey[In, K, Out](maximumGroupSize: Int, groupKey: (In) => K, map: (In) => Out)(
+ reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
Flow[In]
.groupBy[K](maximumGroupSize, groupKey)
diff --git a/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala b/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala
index 65de4ee779..9ca2caaad8 100644
--- a/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala
+++ b/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala
@@ -20,9 +20,10 @@ object SourceOrFlow {
//#log
.log(name = "myStream")
.addAttributes(
- Attributes.logLevels(onElement = Attributes.LogLevels.Off,
- onFailure = Attributes.LogLevels.Error,
- onFinish = Attributes.LogLevels.Info))
+ Attributes.logLevels(
+ onElement = Attributes.LogLevels.Off,
+ onFailure = Attributes.LogLevels.Error,
+ onFinish = Attributes.LogLevels.Info))
//#log
}
diff --git a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala
index 9f1cf594f3..d86bc2b373 100644
--- a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala
+++ b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala
@@ -322,8 +322,9 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
import akka.testkit.EventFilter
import com.typesafe.config.ConfigFactory
- implicit val system = ActorSystem("testsystem",
- ConfigFactory.parseString("""
+ implicit val system = ActorSystem(
+ "testsystem",
+ ConfigFactory.parseString("""
akka.loggers = ["akka.testkit.TestEventListener"]
"""))
try {
diff --git a/akka-docs/src/test/scala/tutorial_4/Device.scala b/akka-docs/src/test/scala/tutorial_4/Device.scala
index c4c5537583..aa683699f8 100644
--- a/akka-docs/src/test/scala/tutorial_4/Device.scala
+++ b/akka-docs/src/test/scala/tutorial_4/Device.scala
@@ -31,11 +31,12 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging
sender() ! DeviceManager.DeviceRegistered
case DeviceManager.RequestTrackDevice(groupId, deviceId) =>
- log.warning("Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.",
- groupId,
- deviceId,
- this.groupId,
- this.deviceId)
+ log.warning(
+ "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.",
+ groupId,
+ deviceId,
+ this.groupId,
+ this.deviceId)
case RecordTemperature(id, value) =>
log.info("Recorded temperature reading {} with {}", value, id)
diff --git a/akka-docs/src/test/scala/tutorial_5/Device.scala b/akka-docs/src/test/scala/tutorial_5/Device.scala
index 0b1e41f4d4..b3f8489aa1 100644
--- a/akka-docs/src/test/scala/tutorial_5/Device.scala
+++ b/akka-docs/src/test/scala/tutorial_5/Device.scala
@@ -31,11 +31,12 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging
sender() ! DeviceManager.DeviceRegistered
case DeviceManager.RequestTrackDevice(groupId, deviceId) =>
- log.warning("Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.",
- groupId,
- deviceId,
- this.groupId,
- this.deviceId)
+ log.warning(
+ "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.",
+ groupId,
+ deviceId,
+ this.groupId,
+ this.deviceId)
case RecordTemperature(id, value) =>
log.info("Recorded temperature reading {} with {}", value, id)
diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala
index 9f3036c593..e7a2d2b697 100644
--- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala
+++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala
@@ -13,18 +13,20 @@ import scala.concurrent.duration._
object DeviceGroupQuery {
case object CollectionTimeout
- def props(actorToDeviceId: Map[ActorRef, String],
- requestId: Long,
- requester: ActorRef,
- timeout: FiniteDuration): Props = {
+ def props(
+ actorToDeviceId: Map[ActorRef, String],
+ requestId: Long,
+ requester: ActorRef,
+ timeout: FiniteDuration): Props = {
Props(new DeviceGroupQuery(actorToDeviceId, requestId, requester, timeout))
}
}
-class DeviceGroupQuery(actorToDeviceId: Map[ActorRef, String],
- requestId: Long,
- requester: ActorRef,
- timeout: FiniteDuration)
+class DeviceGroupQuery(
+ actorToDeviceId: Map[ActorRef, String],
+ requestId: Long,
+ requester: ActorRef,
+ timeout: FiniteDuration)
extends Actor
with ActorLogging {
import DeviceGroupQuery._
@@ -47,8 +49,9 @@ class DeviceGroupQuery(actorToDeviceId: Map[ActorRef, String],
override def receive: Receive =
waitingForReplies(Map.empty, actorToDeviceId.keySet)
- def waitingForReplies(repliesSoFar: Map[String, DeviceGroup.TemperatureReading],
- stillWaiting: Set[ActorRef]): Receive = {
+ def waitingForReplies(
+ repliesSoFar: Map[String, DeviceGroup.TemperatureReading],
+ stillWaiting: Set[ActorRef]): Receive = {
case Device.RespondTemperature(0, valueOption) =>
val deviceActor = sender()
val reading = valueOption match {
@@ -72,10 +75,11 @@ class DeviceGroupQuery(actorToDeviceId: Map[ActorRef, String],
//#query-state
//#query-collect-reply
- def receivedResponse(deviceActor: ActorRef,
- reading: DeviceGroup.TemperatureReading,
- stillWaiting: Set[ActorRef],
- repliesSoFar: Map[String, DeviceGroup.TemperatureReading]): Unit = {
+ def receivedResponse(
+ deviceActor: ActorRef,
+ reading: DeviceGroup.TemperatureReading,
+ stillWaiting: Set[ActorRef],
+ repliesSoFar: Map[String, DeviceGroup.TemperatureReading]): Unit = {
context.unwatch(deviceActor)
val deviceId = actorToDeviceId(deviceActor)
val newStillWaiting = stillWaiting - deviceActor
diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala
index d7ced2fc00..483825d179 100644
--- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala
+++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala
@@ -21,10 +21,11 @@ class DeviceGroupQuerySpec extends AkkaSpec {
val device2 = TestProbe()
val queryActor = system.actorOf(
- DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
- requestId = 1,
- requester = requester.ref,
- timeout = 3.seconds))
+ DeviceGroupQuery.props(
+ actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
+ requestId = 1,
+ requester = requester.ref,
+ timeout = 3.seconds))
device1.expectMsg(Device.ReadTemperature(requestId = 0))
device2.expectMsg(Device.ReadTemperature(requestId = 0))
@@ -33,9 +34,9 @@ class DeviceGroupQuerySpec extends AkkaSpec {
queryActor.tell(Device.RespondTemperature(requestId = 0, Some(2.0)), device2.ref)
requester.expectMsg(
- DeviceGroup.RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> DeviceGroup.Temperature(1.0),
- "device2" -> DeviceGroup.Temperature(2.0))))
+ DeviceGroup.RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), "device2" -> DeviceGroup.Temperature(2.0))))
}
//#query-test-normal
@@ -47,10 +48,11 @@ class DeviceGroupQuerySpec extends AkkaSpec {
val device2 = TestProbe()
val queryActor = system.actorOf(
- DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
- requestId = 1,
- requester = requester.ref,
- timeout = 3.seconds))
+ DeviceGroupQuery.props(
+ actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
+ requestId = 1,
+ requester = requester.ref,
+ timeout = 3.seconds))
device1.expectMsg(Device.ReadTemperature(requestId = 0))
device2.expectMsg(Device.ReadTemperature(requestId = 0))
@@ -59,9 +61,10 @@ class DeviceGroupQuerySpec extends AkkaSpec {
queryActor.tell(Device.RespondTemperature(requestId = 0, Some(2.0)), device2.ref)
requester.expectMsg(
- DeviceGroup.RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> DeviceGroup.TemperatureNotAvailable,
- "device2" -> DeviceGroup.Temperature(2.0))))
+ DeviceGroup.RespondAllTemperatures(
+ requestId = 1,
+ temperatures =
+ Map("device1" -> DeviceGroup.TemperatureNotAvailable, "device2" -> DeviceGroup.Temperature(2.0))))
}
//#query-test-no-reading
@@ -73,10 +76,11 @@ class DeviceGroupQuerySpec extends AkkaSpec {
val device2 = TestProbe()
val queryActor = system.actorOf(
- DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
- requestId = 1,
- requester = requester.ref,
- timeout = 3.seconds))
+ DeviceGroupQuery.props(
+ actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
+ requestId = 1,
+ requester = requester.ref,
+ timeout = 3.seconds))
device1.expectMsg(Device.ReadTemperature(requestId = 0))
device2.expectMsg(Device.ReadTemperature(requestId = 0))
@@ -85,9 +89,9 @@ class DeviceGroupQuerySpec extends AkkaSpec {
device2.ref ! PoisonPill
requester.expectMsg(
- DeviceGroup.RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> DeviceGroup.Temperature(1.0),
- "device2" -> DeviceGroup.DeviceNotAvailable)))
+ DeviceGroup.RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), "device2" -> DeviceGroup.DeviceNotAvailable)))
}
//#query-test-stopped
@@ -99,10 +103,11 @@ class DeviceGroupQuerySpec extends AkkaSpec {
val device2 = TestProbe()
val queryActor = system.actorOf(
- DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
- requestId = 1,
- requester = requester.ref,
- timeout = 3.seconds))
+ DeviceGroupQuery.props(
+ actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
+ requestId = 1,
+ requester = requester.ref,
+ timeout = 3.seconds))
device1.expectMsg(Device.ReadTemperature(requestId = 0))
device2.expectMsg(Device.ReadTemperature(requestId = 0))
@@ -112,9 +117,9 @@ class DeviceGroupQuerySpec extends AkkaSpec {
device2.ref ! PoisonPill
requester.expectMsg(
- DeviceGroup.RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> DeviceGroup.Temperature(1.0),
- "device2" -> DeviceGroup.Temperature(2.0))))
+ DeviceGroup.RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), "device2" -> DeviceGroup.Temperature(2.0))))
}
//#query-test-stopped-later
@@ -126,10 +131,11 @@ class DeviceGroupQuerySpec extends AkkaSpec {
val device2 = TestProbe()
val queryActor = system.actorOf(
- DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
- requestId = 1,
- requester = requester.ref,
- timeout = 1.second))
+ DeviceGroupQuery.props(
+ actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"),
+ requestId = 1,
+ requester = requester.ref,
+ timeout = 1.second))
device1.expectMsg(Device.ReadTemperature(requestId = 0))
device2.expectMsg(Device.ReadTemperature(requestId = 0))
@@ -137,9 +143,9 @@ class DeviceGroupQuerySpec extends AkkaSpec {
queryActor.tell(Device.RespondTemperature(requestId = 0, Some(1.0)), device1.ref)
requester.expectMsg(
- DeviceGroup.RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> DeviceGroup.Temperature(1.0),
- "device2" -> DeviceGroup.DeviceTimedOut)))
+ DeviceGroup.RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), "device2" -> DeviceGroup.DeviceTimedOut)))
}
//#query-test-timeout
diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala
index fac49cfa60..0a58d0e7f2 100644
--- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala
+++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala
@@ -122,10 +122,12 @@ class DeviceGroupSpec extends AkkaSpec {
groupActor.tell(DeviceGroup.RequestAllTemperatures(requestId = 0), probe.ref)
probe.expectMsg(
- DeviceGroup.RespondAllTemperatures(requestId = 0,
- temperatures = Map("device1" -> DeviceGroup.Temperature(1.0),
- "device2" -> DeviceGroup.Temperature(2.0),
- "device3" -> DeviceGroup.TemperatureNotAvailable)))
+ DeviceGroup.RespondAllTemperatures(
+ requestId = 0,
+ temperatures = Map(
+ "device1" -> DeviceGroup.Temperature(1.0),
+ "device2" -> DeviceGroup.Temperature(2.0),
+ "device3" -> DeviceGroup.TemperatureNotAvailable)))
}
//#group-query-integration-test
diff --git a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala
index 127d562cdf..6aca175a89 100644
--- a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala
+++ b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala
@@ -87,8 +87,9 @@ object SupervisingActor {
}
class SupervisingActor(context: ActorContext[String]) extends AbstractBehavior[String] {
- private val child = context.spawn(Behaviors.supervise(SupervisedActor()).onFailure(SupervisorStrategy.restart),
- name = "supervised-actor")
+ private val child = context.spawn(
+ Behaviors.supervise(SupervisedActor()).onFailure(SupervisorStrategy.restart),
+ name = "supervised-actor")
override def onMessage(msg: String): Behavior[String] =
msg match {
diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala
index c6cfd3cdff..ae5f0267bf 100644
--- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala
+++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala
@@ -26,10 +26,11 @@ import typed.tutorial_5.DeviceManager.TemperatureReading
//#query-outline
object DeviceGroupQuery {
- def apply(deviceIdToActor: Map[String, ActorRef[Device.DeviceMessage]],
- requestId: Long,
- requester: ActorRef[RespondAllTemperatures],
- timeout: FiniteDuration): Behavior[DeviceGroupQueryMessage] = {
+ def apply(
+ deviceIdToActor: Map[String, ActorRef[Device.DeviceMessage]],
+ requestId: Long,
+ requester: ActorRef[RespondAllTemperatures],
+ timeout: FiniteDuration): Behavior[DeviceGroupQueryMessage] = {
Behaviors.setup { context =>
Behaviors.withTimers { timers =>
new DeviceGroupQuery(deviceIdToActor, requestId, requester, timeout, context, timers)
@@ -46,12 +47,13 @@ object DeviceGroupQuery {
private final case class DeviceTerminated(deviceId: String) extends DeviceGroupQueryMessage
}
-class DeviceGroupQuery(deviceIdToActor: Map[String, ActorRef[DeviceMessage]],
- requestId: Long,
- requester: ActorRef[RespondAllTemperatures],
- timeout: FiniteDuration,
- context: ActorContext[DeviceGroupQuery.DeviceGroupQueryMessage],
- timers: TimerScheduler[DeviceGroupQuery.DeviceGroupQueryMessage])
+class DeviceGroupQuery(
+ deviceIdToActor: Map[String, ActorRef[DeviceMessage]],
+ requestId: Long,
+ requester: ActorRef[RespondAllTemperatures],
+ timeout: FiniteDuration,
+ context: ActorContext[DeviceGroupQuery.DeviceGroupQueryMessage],
+ timers: TimerScheduler[DeviceGroupQuery.DeviceGroupQueryMessage])
extends AbstractBehavior[DeviceGroupQuery.DeviceGroupQueryMessage] {
import DeviceGroupQuery._
diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala
index 6753c4e0ca..893fb73953 100644
--- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala
+++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala
@@ -39,8 +39,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike {
queryActor ! WrappedRespondTemperature(Device.RespondTemperature(requestId = 0, "device2", Some(2.0)))
requester.expectMessage(
- RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0))))
+ RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0))))
}
//#query-test-normal
@@ -63,8 +64,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike {
queryActor ! WrappedRespondTemperature(Device.RespondTemperature(requestId = 0, "device2", Some(2.0)))
requester.expectMessage(
- RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> TemperatureNotAvailable, "device2" -> Temperature(2.0))))
+ RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> TemperatureNotAvailable, "device2" -> Temperature(2.0))))
}
//#query-test-no-reading
@@ -88,8 +90,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike {
device2.stop()
requester.expectMessage(
- RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> Temperature(2.0), "device2" -> DeviceNotAvailable)))
+ RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> Temperature(2.0), "device2" -> DeviceNotAvailable)))
}
//#query-test-stopped
@@ -114,8 +117,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike {
device2.stop()
requester.expectMessage(
- RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0))))
+ RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0))))
}
//#query-test-stopped-later
@@ -139,8 +143,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike {
// no reply from device2
requester.expectMessage(
- RespondAllTemperatures(requestId = 1,
- temperatures = Map("device1" -> Temperature(1.0), "device2" -> DeviceTimedOut)))
+ RespondAllTemperatures(
+ requestId = 1,
+ temperatures = Map("device1" -> Temperature(1.0), "device2" -> DeviceTimedOut)))
}
//#query-test-timeout
diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala
index 1696da1f69..8ae9eb9492 100644
--- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala
+++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala
@@ -131,10 +131,10 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with WordSpecLike {
val allTempProbe = createTestProbe[RespondAllTemperatures]()
groupActor ! RequestAllTemperatures(requestId = 0, groupId = "group", allTempProbe.ref)
allTempProbe.expectMessage(
- RespondAllTemperatures(requestId = 0,
- temperatures = Map("device1" -> Temperature(1.0),
- "device2" -> Temperature(2.0),
- "device3" -> TemperatureNotAvailable)))
+ RespondAllTemperatures(
+ requestId = 0,
+ temperatures =
+ Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0), "device3" -> TemperatureNotAvailable)))
}
//#group-query-integration-test
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala
index af626e419e..05ac416faa 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -79,9 +79,10 @@ trait Conductor { this: TestConductorExt =>
* @param participants gives the number of participants which shall connect
* before any of their startClient() operations complete.
*/
- def startController(participants: Int,
- name: RoleName,
- controllerPort: InetSocketAddress): Future[InetSocketAddress] = {
+ def startController(
+ participants: Int,
+ name: RoleName,
+ controllerPort: InetSocketAddress): Future[InetSocketAddress] = {
if (_controller ne null) throw new RuntimeException("TestConductorServer was already started")
_controller = system.actorOf(Props(classOf[Controller], participants, controllerPort), "controller")
import Settings.BarrierTimeout
@@ -424,12 +425,11 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP
import BarrierCoordinator._
val settings = TestConductor().Settings
- val connection = RemoteConnection(Server,
- controllerPort,
- settings.ServerSocketWorkerPoolSize,
- new ConductorHandler(settings.QueryTimeout,
- self,
- Logging(context.system, classOf[ConductorHandler].getName)))
+ val connection = RemoteConnection(
+ Server,
+ controllerPort,
+ settings.ServerSocketWorkerPoolSize,
+ new ConductorHandler(settings.QueryTimeout, self, Logging(context.system, classOf[ConductorHandler].getName)))
/*
* Supervision of the BarrierCoordinator means to catch all his bad emotions
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala
index 7ba3ec89aa..4a1de5f4dc 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala
@@ -148,8 +148,9 @@ private[akka] class MsgDecoder extends OneToOneDecoder {
case BarrierOp.Failed => BarrierResult(barrier.getName, false)
case BarrierOp.Fail => FailBarrier(barrier.getName)
case BarrierOp.Enter =>
- EnterBarrier(barrier.getName,
- if (barrier.hasTimeout) Option(Duration.fromNanos(barrier.getTimeout)) else None)
+ EnterBarrier(
+ barrier.getName,
+ if (barrier.hasTimeout) Option(Duration.fromNanos(barrier.getTimeout)) else None)
}
} else if (w.hasFailure) {
val f = w.getFailure
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala
index 2487e2ed9c..e2919d971f 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala
@@ -65,9 +65,10 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C
val PacketSplitThreshold = config.getMillisDuration("packet-split-threshold")
private def computeWPS(config: Config): Int =
- ThreadPoolConfig.scaledPoolSize(config.getInt("pool-size-min"),
- config.getDouble("pool-size-factor"),
- config.getInt("pool-size-max"))
+ ThreadPoolConfig.scaledPoolSize(
+ config.getInt("pool-size-min"),
+ config.getDouble("pool-size-factor"),
+ config.getInt("pool-size-max"))
val ServerSocketWorkerPoolSize = computeWPS(config.getConfig("netty.server-socket-worker-pool"))
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala
index 426bb57bd1..450fbcb908 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala
@@ -172,13 +172,14 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress)
val settings = TestConductor().Settings
- val handler = new PlayerHandler(controllerAddr,
- settings.ClientReconnects,
- settings.ReconnectBackoff,
- settings.ClientSocketWorkerPoolSize,
- self,
- Logging(context.system, classOf[PlayerHandler].getName),
- context.system.scheduler)(context.dispatcher)
+ val handler = new PlayerHandler(
+ controllerAddr,
+ settings.ClientReconnects,
+ settings.ReconnectBackoff,
+ settings.ClientSocketWorkerPoolSize,
+ self,
+ Logging(context.system, classOf[PlayerHandler].getName),
+ context.system.scheduler)(context.dispatcher)
startWith(Connecting, Data(None, None))
@@ -257,10 +258,11 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress)
// Conversion needed as the TokenBucket measures in octets: 125000 Octets/s = 1Mbit/s
// FIXME: Initial capacity should be carefully chosen
else
- TokenBucket(capacity = 1000,
- tokensPerSecond = t.rateMBit * 125000.0,
- nanoTimeOfLastSend = 0,
- availableTokens = 0)
+ TokenBucket(
+ capacity = 1000,
+ tokensPerSecond = t.rateMBit * 125000.0,
+ nanoTimeOfLastSend = 0,
+ availableTokens = 0)
val cmdFuture = TestConductor().transport.managementCommand(SetThrottle(t.target, t.direction, mode))
@@ -308,13 +310,14 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress)
*
* INTERNAL API.
*/
-private[akka] class PlayerHandler(server: InetSocketAddress,
- private var reconnects: Int,
- backoff: FiniteDuration,
- poolSize: Int,
- fsm: ActorRef,
- log: LoggingAdapter,
- scheduler: Scheduler)(implicit executor: ExecutionContext)
+private[akka] class PlayerHandler(
+ server: InetSocketAddress,
+ private var reconnects: Int,
+ backoff: FiniteDuration,
+ poolSize: Int,
+ fsm: ActorRef,
+ log: LoggingAdapter,
+ scheduler: Scheduler)(implicit executor: ExecutionContext)
extends SimpleChannelUpstreamHandler {
import ClientFSM._
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala
index ac7d5aeffc..cbd497309f 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala
@@ -210,11 +210,12 @@ object MultiNodeSpec {
require(selfIndex >= 0 && selfIndex < maxNodes, "multinode.index is out of bounds: " + selfIndex)
private[testkit] val nodeConfig = mapToConfig(
- Map("akka.actor.provider" -> "remote",
- "akka.remote.artery.canonical.hostname" -> selfName,
- "akka.remote.netty.tcp.hostname" -> selfName,
- "akka.remote.netty.tcp.port" -> selfPort,
- "akka.remote.artery.canonical.port" -> selfPort))
+ Map(
+ "akka.actor.provider" -> "remote",
+ "akka.remote.artery.canonical.hostname" -> selfName,
+ "akka.remote.netty.tcp.hostname" -> selfName,
+ "akka.remote.netty.tcp.port" -> selfPort,
+ "akka.remote.artery.canonical.port" -> selfPort))
private[testkit] val baseConfig: Config =
ConfigFactory.parseString("""
@@ -261,10 +262,11 @@ object MultiNodeSpec {
* `AskTimeoutException: sending to terminated ref breaks promises`. Using lazy
* val is fine.
*/
-abstract class MultiNodeSpec(val myself: RoleName,
- _system: ActorSystem,
- _roles: immutable.Seq[RoleName],
- deployments: RoleName => Seq[String])
+abstract class MultiNodeSpec(
+ val myself: RoleName,
+ _system: ActorSystem,
+ _roles: immutable.Seq[RoleName],
+ deployments: RoleName => Seq[String])
extends TestKit(_system)
with MultiNodeSpecCallbacks {
@@ -359,8 +361,9 @@ abstract class MultiNodeSpec(val myself: RoleName,
* }}}
*/
def initialParticipants: Int
- require(initialParticipants > 0,
- "initialParticipants must be a 'def' or early initializer, and it must be greater zero")
+ require(
+ initialParticipants > 0,
+ "initialParticipants must be a 'def' or early initializer, and it must be greater zero")
require(initialParticipants <= maxNodes, "not enough nodes to run this test")
/**
@@ -390,8 +393,9 @@ abstract class MultiNodeSpec(val myself: RoleName,
* the innermost enclosing `within` block or the default `BarrierTimeout`
*/
def enterBarrier(name: String*): Unit =
- testConductor.enter(Timeout.durationToTimeout(remainingOr(testConductor.Settings.BarrierTimeout.duration)),
- name.to(immutable.Seq))
+ testConductor.enter(
+ Timeout.durationToTimeout(remainingOr(testConductor.Settings.BarrierTimeout.duration)),
+ name.to(immutable.Seq))
/**
* Query the controller for the transport address of the given node (by role name) and
diff --git a/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala b/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala
index a27855c030..4826967ebf 100644
--- a/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala
+++ b/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala
@@ -72,12 +72,14 @@ class DefaultOSGiLogger extends DefaultLogger {
def logMessage(logService: LogService, event: LogEvent): Unit = {
event match {
case error: Logging.Error if error.cause != NoCause =>
- logService.log(event.level.asInt,
- messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message),
- error.cause)
+ logService.log(
+ event.level.asInt,
+ messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message),
+ error.cause)
case _ =>
- logService.log(event.level.asInt,
- messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message))
+ logService.log(
+ event.level.asInt,
+ messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message))
}
}
diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala
index 01e297935c..b902b26122 100644
--- a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala
+++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala
@@ -12,9 +12,10 @@ import org.osgi.framework.BundleContext
* Factory class to create ActorSystem implementations in an OSGi environment. This mainly involves dealing with
* bundle classloaders appropriately to ensure that configuration files and classes get loaded properly
*/
-class OsgiActorSystemFactory(val context: BundleContext,
- val fallbackClassLoader: Option[ClassLoader],
- config: Config = ConfigFactory.empty) {
+class OsgiActorSystemFactory(
+ val context: BundleContext,
+ val fallbackClassLoader: Option[ClassLoader],
+ config: Config = ConfigFactory.empty) {
/*
* Classloader that delegates to the bundle for which the factory is creating an ActorSystem
diff --git a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala
index 908b89450f..5c5ac7ff26 100644
--- a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala
+++ b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala
@@ -150,9 +150,10 @@ class BundleDescriptorBuilder(name: String) {
*/
def build: BundleDescriptor = {
val file: File = tinybundleToJarFile(name)
- new BundleDescriptor(getClass().getClassLoader(),
- new URL("jar:" + file.toURI().toString() + "!/"),
- extractHeaders(file))
+ new BundleDescriptor(
+ getClass().getClassLoader(),
+ new URL("jar:" + file.toURI().toString() + "!/"),
+ extractHeaders(file))
}
def extractHeaders(file: File): HashMap[String, String] = {
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala
index b26d99d33f..172d389fc5 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala
@@ -62,9 +62,10 @@ class PersistenceQuery(system: ExtendedActorSystem)
* Java API: Returns the [[akka.persistence.query.javadsl.ReadJournal]] specified by the given
* read journal configuration entry.
*/
- final def getReadJournalFor[T <: javadsl.ReadJournal](clazz: Class[T],
- readJournalPluginId: String,
- readJournalPluginConfig: Config): T =
+ final def getReadJournalFor[T <: javadsl.ReadJournal](
+ clazz: Class[T],
+ readJournalPluginId: String,
+ readJournalPluginConfig: Config): T =
pluginFor(readJournalPluginId, readJournalPluginConfig).javadslPlugin.asInstanceOf[T]
final def getReadJournalFor[T <: javadsl.ReadJournal](clazz: Class[T], readJournalPluginId: String): T =
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala
index cad6d56395..ca7275d222 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala
@@ -19,8 +19,9 @@ trait CurrentEventsByPersistenceIdQuery extends ReadJournal {
* the "result set". Events that are stored after the query is completed are
* not included in the event stream.
*/
- def currentEventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long): Source[EventEnvelope, NotUsed]
+ def currentEventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long): Source[EventEnvelope, NotUsed]
}
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala
index f1cc95650f..9ac5988875 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala
@@ -26,8 +26,9 @@ trait EventsByPersistenceIdQuery extends ReadJournal {
* Corresponding query that is completed when it reaches the end of the currently
* stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]].
*/
- def eventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long): Source[EventEnvelope, NotUsed]
+ def eventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long): Source[EventEnvelope, NotUsed]
}
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala
index c07f31c714..115f6d1467 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala
@@ -18,28 +18,31 @@ import akka.persistence.query.{ EventEnvelope, Sequence }
* INTERNAL API
*/
private[akka] object EventsByPersistenceIdPublisher {
- def props(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long,
- refreshInterval: Option[FiniteDuration],
- maxBufSize: Int,
- writeJournalPluginId: String): Props = {
+ def props(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long,
+ refreshInterval: Option[FiniteDuration],
+ maxBufSize: Int,
+ writeJournalPluginId: String): Props = {
refreshInterval match {
case Some(interval) =>
Props(
- new LiveEventsByPersistenceIdPublisher(persistenceId,
- fromSequenceNr,
- toSequenceNr,
- interval,
- maxBufSize,
- writeJournalPluginId))
+ new LiveEventsByPersistenceIdPublisher(
+ persistenceId,
+ fromSequenceNr,
+ toSequenceNr,
+ interval,
+ maxBufSize,
+ writeJournalPluginId))
case None =>
Props(
- new CurrentEventsByPersistenceIdPublisher(persistenceId,
- fromSequenceNr,
- toSequenceNr,
- maxBufSize,
- writeJournalPluginId))
+ new CurrentEventsByPersistenceIdPublisher(
+ persistenceId,
+ fromSequenceNr,
+ toSequenceNr,
+ maxBufSize,
+ writeJournalPluginId))
}
}
@@ -53,10 +56,11 @@ private[akka] object EventsByPersistenceIdPublisher {
* INTERNAL API
*/
// FIXME needs a be rewritten as a GraphStage (since 2.5.0)
-private[akka] abstract class AbstractEventsByPersistenceIdPublisher(val persistenceId: String,
- val fromSequenceNr: Long,
- val maxBufSize: Int,
- val writeJournalPluginId: String)
+private[akka] abstract class AbstractEventsByPersistenceIdPublisher(
+ val persistenceId: String,
+ val fromSequenceNr: Long,
+ val maxBufSize: Int,
+ val writeJournalPluginId: String)
extends ActorPublisher[EventEnvelope]
with DeliveryBuffer[EventEnvelope]
with ActorLogging {
@@ -97,21 +101,23 @@ private[akka] abstract class AbstractEventsByPersistenceIdPublisher(val persiste
def replay(): Unit = {
val limit = maxBufSize - buf.size
- log.debug("request replay for persistenceId [{}] from [{}] to [{}] limit [{}]",
- persistenceId,
- currSeqNo,
- toSequenceNr,
- limit)
+ log.debug(
+ "request replay for persistenceId [{}] from [{}] to [{}] limit [{}]",
+ persistenceId,
+ currSeqNo,
+ toSequenceNr,
+ limit)
journal ! ReplayMessages(currSeqNo, toSequenceNr, limit, persistenceId, self)
context.become(replaying(limit))
}
def replaying(limit: Int): Receive = {
case ReplayedMessage(p) =>
- buf :+= EventEnvelope(offset = Sequence(p.sequenceNr),
- persistenceId = persistenceId,
- sequenceNr = p.sequenceNr,
- event = p.payload)
+ buf :+= EventEnvelope(
+ offset = Sequence(p.sequenceNr),
+ persistenceId = persistenceId,
+ sequenceNr = p.sequenceNr,
+ event = p.payload)
currSeqNo = p.sequenceNr + 1
deliverBuf()
@@ -140,12 +146,13 @@ private[akka] abstract class AbstractEventsByPersistenceIdPublisher(val persiste
* INTERNAL API
*/
// FIXME needs a be rewritten as a GraphStage (since 2.5.0)
-private[akka] class LiveEventsByPersistenceIdPublisher(persistenceId: String,
- fromSequenceNr: Long,
- override val toSequenceNr: Long,
- refreshInterval: FiniteDuration,
- maxBufSize: Int,
- writeJournalPluginId: String)
+private[akka] class LiveEventsByPersistenceIdPublisher(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ override val toSequenceNr: Long,
+ refreshInterval: FiniteDuration,
+ maxBufSize: Int,
+ writeJournalPluginId: String)
extends AbstractEventsByPersistenceIdPublisher(persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) {
import EventsByPersistenceIdPublisher._
@@ -178,11 +185,12 @@ private[akka] class LiveEventsByPersistenceIdPublisher(persistenceId: String,
/**
* INTERNAL API
*/
-private[akka] class CurrentEventsByPersistenceIdPublisher(persistenceId: String,
- fromSequenceNr: Long,
- var toSeqNr: Long,
- maxBufSize: Int,
- writeJournalPluginId: String)
+private[akka] class CurrentEventsByPersistenceIdPublisher(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ var toSeqNr: Long,
+ maxBufSize: Int,
+ writeJournalPluginId: String)
extends AbstractEventsByPersistenceIdPublisher(persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) {
import EventsByPersistenceIdPublisher._
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala
index 1886b7f38b..a5193d23bc 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala
@@ -20,12 +20,13 @@ import akka.persistence.journal.leveldb.LeveldbJournal.ReplayedTaggedMessage
* INTERNAL API
*/
private[akka] object EventsByTagPublisher {
- def props(tag: String,
- fromOffset: Long,
- toOffset: Long,
- refreshInterval: Option[FiniteDuration],
- maxBufSize: Int,
- writeJournalPluginId: String): Props = {
+ def props(
+ tag: String,
+ fromOffset: Long,
+ toOffset: Long,
+ refreshInterval: Option[FiniteDuration],
+ maxBufSize: Int,
+ writeJournalPluginId: String): Props = {
refreshInterval match {
case Some(interval) =>
Props(new LiveEventsByTagPublisher(tag, fromOffset, toOffset, interval, maxBufSize, writeJournalPluginId))
@@ -44,10 +45,11 @@ private[akka] object EventsByTagPublisher {
* INTERNAL API
*/
// FIXME needs a be rewritten as a GraphStage
-private[akka] abstract class AbstractEventsByTagPublisher(val tag: String,
- val fromOffset: Long,
- val maxBufSize: Int,
- val writeJournalPluginId: String)
+private[akka] abstract class AbstractEventsByTagPublisher(
+ val tag: String,
+ val fromOffset: Long,
+ val maxBufSize: Int,
+ val writeJournalPluginId: String)
extends ActorPublisher[EventEnvelope]
with DeliveryBuffer[EventEnvelope]
with ActorLogging {
@@ -95,10 +97,11 @@ private[akka] abstract class AbstractEventsByTagPublisher(val tag: String,
def replaying(limit: Int): Receive = {
case ReplayedTaggedMessage(p, _, offset) =>
- buf :+= EventEnvelope(offset = Sequence(offset),
- persistenceId = p.persistenceId,
- sequenceNr = p.sequenceNr,
- event = p.payload)
+ buf :+= EventEnvelope(
+ offset = Sequence(offset),
+ persistenceId = p.persistenceId,
+ sequenceNr = p.sequenceNr,
+ event = p.payload)
currOffset = offset
deliverBuf()
@@ -127,12 +130,13 @@ private[akka] abstract class AbstractEventsByTagPublisher(val tag: String,
* INTERNAL API
*/
// FIXME needs a be rewritten as a GraphStage (since 2.5.0)
-private[akka] class LiveEventsByTagPublisher(tag: String,
- fromOffset: Long,
- override val toOffset: Long,
- refreshInterval: FiniteDuration,
- maxBufSize: Int,
- writeJournalPluginId: String)
+private[akka] class LiveEventsByTagPublisher(
+ tag: String,
+ fromOffset: Long,
+ override val toOffset: Long,
+ refreshInterval: FiniteDuration,
+ maxBufSize: Int,
+ writeJournalPluginId: String)
extends AbstractEventsByTagPublisher(tag, fromOffset, maxBufSize, writeJournalPluginId) {
import EventsByTagPublisher._
@@ -166,11 +170,12 @@ private[akka] class LiveEventsByTagPublisher(tag: String,
* INTERNAL API
*/
// FIXME needs a be rewritten as a GraphStage (since 2.5.0)
-private[akka] class CurrentEventsByTagPublisher(tag: String,
- fromOffset: Long,
- var _toOffset: Long,
- maxBufSize: Int,
- writeJournalPluginId: String)
+private[akka] class CurrentEventsByTagPublisher(
+ tag: String,
+ fromOffset: Long,
+ var _toOffset: Long,
+ maxBufSize: Int,
+ writeJournalPluginId: String)
extends AbstractEventsByTagPublisher(tag, fromOffset, maxBufSize, writeJournalPluginId) {
import EventsByTagPublisher._
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala
index 4a0128ef1f..3ab737e4c1 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala
@@ -89,9 +89,10 @@ class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.lev
* The stream is completed with failure if there is a failure in executing the query in the
* backend journal.
*/
- override def eventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long): Source[EventEnvelope, NotUsed] =
+ override def eventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long): Source[EventEnvelope, NotUsed] =
scaladslReadJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
/**
@@ -99,9 +100,10 @@ class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.lev
* is completed immediately when it reaches the end of the "result set". Events that are
* stored after the query is completed are not included in the event stream.
*/
- override def currentEventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long): Source[EventEnvelope, NotUsed] =
+ override def currentEventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long): Source[EventEnvelope, NotUsed] =
scaladslReadJournal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
/**
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala
index 1479ea5ee0..921688e22d 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala
@@ -113,9 +113,10 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config)
* The stream is completed with failure if there is a failure in executing the query in the
* backend journal.
*/
- override def eventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long = 0L,
- toSequenceNr: Long = Long.MaxValue): Source[EventEnvelope, NotUsed] = {
+ override def eventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long = 0L,
+ toSequenceNr: Long = Long.MaxValue): Source[EventEnvelope, NotUsed] = {
Source
.actorPublisher[EventEnvelope](EventsByPersistenceIdPublisher
.props(persistenceId, fromSequenceNr, toSequenceNr, refreshInterval, maxBufSize, writeJournalPluginId))
@@ -128,9 +129,10 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config)
* is completed immediately when it reaches the end of the "result set". Events that are
* stored after the query is completed are not included in the event stream.
*/
- override def currentEventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long = 0L,
- toSequenceNr: Long = Long.MaxValue): Source[EventEnvelope, NotUsed] = {
+ override def currentEventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long = 0L,
+ toSequenceNr: Long = Long.MaxValue): Source[EventEnvelope, NotUsed] = {
Source
.actorPublisher[EventEnvelope](EventsByPersistenceIdPublisher
.props(persistenceId, fromSequenceNr, toSequenceNr, None, maxBufSize, writeJournalPluginId))
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala
index aad344e388..750ac0f77d 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala
@@ -19,8 +19,9 @@ trait CurrentEventsByPersistenceIdQuery extends ReadJournal {
* the "result set". Events that are stored after the query is completed are
* not included in the event stream.
*/
- def currentEventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long): Source[EventEnvelope, NotUsed]
+ def currentEventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long): Source[EventEnvelope, NotUsed]
}
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala
index f34d4b5501..13e12da6e6 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala
@@ -26,8 +26,9 @@ trait EventsByPersistenceIdQuery extends ReadJournal {
* Corresponding query that is completed when it reaches the end of the currently
* stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]].
*/
- def eventsByPersistenceId(persistenceId: String,
- fromSequenceNr: Long,
- toSequenceNr: Long): Source[EventEnvelope, NotUsed]
+ def eventsByPersistenceId(
+ persistenceId: String,
+ fromSequenceNr: Long,
+ toSequenceNr: Long): Source[EventEnvelope, NotUsed]
}
diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala
index 49d717bd71..e149d3f63b 100644
--- a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala
+++ b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala
@@ -10,9 +10,10 @@ import org.apache.commons.io.FileUtils
trait Cleanup { this: AkkaSpec =>
val storageLocations =
- List("akka.persistence.journal.leveldb.dir",
- "akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
+ List(
+ "akka.persistence.journal.leveldb.dir",
+ "akka.persistence.journal.leveldb-shared.store.dir",
+ "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
override protected def atStartup(): Unit = {
storageLocations.foreach(FileUtils.deleteDirectory)
diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala
index b2256e9b6a..8c8f8c3f08 100644
--- a/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala
+++ b/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala
@@ -276,9 +276,10 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(customSerializers) {
}
"handle a few unconfirmed" in {
- val unconfirmed = Vector(UnconfirmedDelivery(deliveryId = 1, destination = testActor.path, "a"),
- UnconfirmedDelivery(deliveryId = 2, destination = testActor.path, "b"),
- UnconfirmedDelivery(deliveryId = 3, destination = testActor.path, 42))
+ val unconfirmed = Vector(
+ UnconfirmedDelivery(deliveryId = 1, destination = testActor.path, "a"),
+ UnconfirmedDelivery(deliveryId = 2, destination = testActor.path, "b"),
+ UnconfirmedDelivery(deliveryId = 3, destination = testActor.path, 42))
val snap = AtLeastOnceDeliverySnapshot(17, unconfirmed)
val serializer = serialization.findSerializerFor(snap)
diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala
index bb9ef95a04..f2b6a6ffc0 100644
--- a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala
+++ b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala
@@ -85,11 +85,12 @@ abstract class JournalSpec(config: Config)
def writeMessages(fromSnr: Int, toSnr: Int, pid: String, sender: ActorRef, writerUuid: String): Unit = {
def persistentRepr(sequenceNr: Long) =
- PersistentRepr(payload = s"a-$sequenceNr",
- sequenceNr = sequenceNr,
- persistenceId = pid,
- sender = sender,
- writerUuid = writerUuid)
+ PersistentRepr(
+ payload = s"a-$sequenceNr",
+ sequenceNr = sequenceNr,
+ persistenceId = pid,
+ sender = sender,
+ writerUuid = writerUuid)
val msgs =
if (supportsAtomicPersistAllOfSeveralEvents) {
@@ -248,11 +249,12 @@ abstract class JournalSpec(config: Config)
val msgs = (6 to 8).map { i =>
val event = if (i == 7) notSerializableEvent else s"b-$i"
AtomicWrite(
- PersistentRepr(payload = event,
- sequenceNr = i,
- persistenceId = pid,
- sender = Actor.noSender,
- writerUuid = writerUuid))
+ PersistentRepr(
+ payload = event,
+ sequenceNr = i,
+ persistenceId = pid,
+ sender = Actor.noSender,
+ writerUuid = writerUuid))
}
val probe = TestProbe()
@@ -282,11 +284,12 @@ abstract class JournalSpec(config: Config)
val event = TestPayload(probe.ref)
val aw =
AtomicWrite(
- PersistentRepr(payload = event,
- sequenceNr = 6L,
- persistenceId = pid,
- sender = Actor.noSender,
- writerUuid = writerUuid))
+ PersistentRepr(
+ payload = event,
+ sequenceNr = 6L,
+ persistenceId = pid,
+ sender = Actor.noSender,
+ writerUuid = writerUuid))
journal ! WriteMessages(List(aw), probe.ref, actorInstanceId)
diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala
index 84ae83fb44..c3f553e72c 100644
--- a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala
+++ b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala
@@ -81,8 +81,9 @@ abstract class SnapshotStoreSpec(config: Config)
senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue))
}
"not load a snapshot given non-matching timestamp criteria" in {
- snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest.copy(maxTimestamp = 100), Long.MaxValue),
- senderProbe.ref)
+ snapshotStore.tell(
+ LoadSnapshot(pid, SnapshotSelectionCriteria.Latest.copy(maxTimestamp = 100), Long.MaxValue),
+ senderProbe.ref)
senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue))
}
"not load a snapshot given non-matching sequence number criteria" in {
@@ -102,8 +103,9 @@ abstract class SnapshotStoreSpec(config: Config)
senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata(2), s"s-3")), 13))
}
"load the most recent snapshot matching upper sequence number and timestamp bounds" in {
- snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(13, metadata(2).timestamp), Long.MaxValue),
- senderProbe.ref)
+ snapshotStore.tell(
+ LoadSnapshot(pid, SnapshotSelectionCriteria(13, metadata(2).timestamp), Long.MaxValue),
+ senderProbe.ref)
senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata(2), s"s-3")), Long.MaxValue))
snapshotStore.tell(
LoadSnapshot(pid, SnapshotSelectionCriteria.Latest.copy(maxTimestamp = metadata(2).timestamp), 13),
@@ -134,8 +136,9 @@ abstract class SnapshotStoreSpec(config: Config)
sub.expectMsg(cmd)
senderProbe.expectMsg(DeleteSnapshotsSuccess(criteria))
- snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(md.sequenceNr, md.timestamp), Long.MaxValue),
- senderProbe.ref)
+ snapshotStore.tell(
+ LoadSnapshot(pid, SnapshotSelectionCriteria(md.sequenceNr, md.timestamp), Long.MaxValue),
+ senderProbe.ref)
senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue))
snapshotStore.tell(
LoadSnapshot(pid, SnapshotSelectionCriteria(metadata(3).sequenceNr, metadata(3).timestamp), Long.MaxValue),
diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala
index 5f75d360e4..53fd6d97ad 100644
--- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala
+++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala
@@ -8,10 +8,8 @@ import akka.persistence.journal.JournalSpec
import akka.persistence.{ PersistenceSpec, PluginCleanup }
class LeveldbJournalJavaSpec
- extends JournalSpec(
- config = PersistenceSpec.config("leveldb",
- "LeveldbJournalJavaSpec",
- extraConfig = Some("akka.persistence.journal.leveldb.native = off")))
+ extends JournalSpec(config = PersistenceSpec
+ .config("leveldb", "LeveldbJournalJavaSpec", extraConfig = Some("akka.persistence.journal.leveldb.native = off")))
with PluginCleanup {
override def supportsRejectingNonSerializableObjects = true
diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala
index 2e186fb9ef..046a7a5db7 100644
--- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala
+++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala
@@ -11,9 +11,10 @@ import org.scalatest.DoNotDiscover
@DoNotDiscover // because only checking that compilation is OK with JournalPerfSpec
class LeveldbJournalNativePerfSpec
extends JournalPerfSpec(
- config = PersistenceSpec.config("leveldb",
- "LeveldbJournalNativePerfSpec",
- extraConfig = Some("akka.persistence.journal.leveldb.native = on")))
+ config = PersistenceSpec.config(
+ "leveldb",
+ "LeveldbJournalNativePerfSpec",
+ extraConfig = Some("akka.persistence.journal.leveldb.native = on")))
with PluginCleanup {
override def supportsRejectingNonSerializableObjects = true
diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala
index e2f0400213..5f6c382274 100644
--- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala
+++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala
@@ -9,9 +9,10 @@ import akka.persistence.{ PersistenceSpec, PluginCleanup }
class LeveldbJournalNativeSpec
extends JournalSpec(
- config = PersistenceSpec.config("leveldb",
- "LeveldbJournalNativeSpec",
- extraConfig = Some("akka.persistence.journal.leveldb.native = on")))
+ config = PersistenceSpec.config(
+ "leveldb",
+ "LeveldbJournalNativeSpec",
+ extraConfig = Some("akka.persistence.journal.leveldb.native = on")))
with PluginCleanup {
override def supportsRejectingNonSerializableObjects = true
diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala
index d65f766e1d..c1163e0a94 100644
--- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala
+++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala
@@ -9,9 +9,10 @@ import akka.persistence.{ PersistenceSpec, PluginCleanup }
class LeveldbJournalNoAtomicPersistMultipleEventsSpec
extends JournalSpec(
- config = PersistenceSpec.config("leveldb",
- "LeveldbJournalNoAtomicPersistMultipleEventsSpec",
- extraConfig = Some("akka.persistence.journal.leveldb.native = off")))
+ config = PersistenceSpec.config(
+ "leveldb",
+ "LeveldbJournalNoAtomicPersistMultipleEventsSpec",
+ extraConfig = Some("akka.persistence.journal.leveldb.native = off")))
with PluginCleanup {
/**
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala
index 0c38c3adad..273683b9e7 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala
@@ -26,8 +26,9 @@ private[akka] class Callback[State](val sideEffect: State => Unit) extends SideE
/** INTERNAL API */
@InternalApi
-final private[akka] class ReplyEffectImpl[ReplyMessage, State](replyTo: ActorRef[ReplyMessage],
- replyWithMessage: State => ReplyMessage)
+final private[akka] class ReplyEffectImpl[ReplyMessage, State](
+ replyTo: ActorRef[ReplyMessage],
+ replyWithMessage: State => ReplyMessage)
extends Callback[State](state => replyTo ! replyWithMessage(state)) {
override def toString: String = "Reply"
}
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala
index 1596d66aec..a2334d6e80 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala
@@ -23,20 +23,21 @@ import akka.util.OptionVal
* INTERNAL API: Carry state for the Persistent behavior implementation behaviors.
*/
@InternalApi
-private[akka] final class BehaviorSetup[C, E, S](val context: ActorContext[InternalProtocol],
- val persistenceId: PersistenceId,
- val emptyState: S,
- val commandHandler: EventSourcedBehavior.CommandHandler[C, E, S],
- val eventHandler: EventSourcedBehavior.EventHandler[S, E],
- val writerIdentity: EventSourcedBehaviorImpl.WriterIdentity,
- private val signalHandler: PartialFunction[Signal, Unit],
- val tagger: E ⇒ Set[String],
- val eventAdapter: EventAdapter[E, _],
- val snapshotWhen: (S, E, Long) ⇒ Boolean,
- val recovery: Recovery,
- var holdingRecoveryPermit: Boolean,
- val settings: EventSourcedSettings,
- val stashState: StashState) {
+private[akka] final class BehaviorSetup[C, E, S](
+ val context: ActorContext[InternalProtocol],
+ val persistenceId: PersistenceId,
+ val emptyState: S,
+ val commandHandler: EventSourcedBehavior.CommandHandler[C, E, S],
+ val eventHandler: EventSourcedBehavior.EventHandler[S, E],
+ val writerIdentity: EventSourcedBehaviorImpl.WriterIdentity,
+ private val signalHandler: PartialFunction[Signal, Unit],
+ val tagger: E ⇒ Set[String],
+ val eventAdapter: EventAdapter[E, _],
+ val snapshotWhen: (S, E, Long) ⇒ Boolean,
+ val recovery: Recovery,
+ var holdingRecoveryPermit: Boolean,
+ val settings: EventSourcedSettings,
+ val stashState: StashState) {
import InternalProtocol.RecoveryTickEvent
import akka.actor.typed.scaladsl.adapter._
@@ -82,10 +83,11 @@ private[akka] final class BehaviorSetup[C, E, S](val context: ActorContext[Inter
context.system.scheduler
.scheduleOnce(settings.recoveryEventTimeout, context.self.toUntyped, RecoveryTickEvent(snapshot = true))
else
- context.system.scheduler.schedule(settings.recoveryEventTimeout,
- settings.recoveryEventTimeout,
- context.self.toUntyped,
- RecoveryTickEvent(snapshot = false))
+ context.system.scheduler.schedule(
+ settings.recoveryEventTimeout,
+ settings.recoveryEventTimeout,
+ context.self.toUntyped,
+ RecoveryTickEvent(snapshot = false))
recoveryTimer = OptionVal.Some(timer)
}
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala
index 3d7ae90692..a5682d2ebc 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala
@@ -39,15 +39,17 @@ private[akka] abstract class EffectImpl[+Event, State]
/** INTERNAL API */
@InternalApi
private[akka] object CompositeEffect {
- def apply[Event, State](effect: scaladsl.Effect[Event, State],
- sideEffects: SideEffect[State]): CompositeEffect[Event, State] =
+ def apply[Event, State](
+ effect: scaladsl.Effect[Event, State],
+ sideEffects: SideEffect[State]): CompositeEffect[Event, State] =
CompositeEffect[Event, State](effect, sideEffects :: Nil)
}
/** INTERNAL API */
@InternalApi
-private[akka] final case class CompositeEffect[Event, State](persistingEffect: scaladsl.Effect[Event, State],
- _sideEffects: immutable.Seq[SideEffect[State]])
+private[akka] final case class CompositeEffect[Event, State](
+ persistingEffect: scaladsl.Effect[Event, State],
+ _sideEffects: immutable.Seq[SideEffect[State]])
extends EffectImpl[Event, State] {
override val events: immutable.Seq[Event] = persistingEffect.events
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala
index cddf92f536..af2d5196e9 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala
@@ -87,35 +87,34 @@ private[akka] final case class EventSourcedBehaviorImpl[Command, Event, State](
Behaviors
.supervise {
Behaviors.setup[Command] { _ ⇒
- val eventSourcedSetup = new BehaviorSetup(ctx.asInstanceOf[ActorContext[InternalProtocol]],
- persistenceId,
- emptyState,
- commandHandler,
- eventHandler,
- WriterIdentity.newIdentity(),
- actualSignalHandler,
- tagger,
- eventAdapter,
- snapshotWhen,
- recovery,
- holdingRecoveryPermit = false,
- settings = settings,
- stashState = stashState)
+ val eventSourcedSetup = new BehaviorSetup(
+ ctx.asInstanceOf[ActorContext[InternalProtocol]],
+ persistenceId,
+ emptyState,
+ commandHandler,
+ eventHandler,
+ WriterIdentity.newIdentity(),
+ actualSignalHandler,
+ tagger,
+ eventAdapter,
+ snapshotWhen,
+ recovery,
+ holdingRecoveryPermit = false,
+ settings = settings,
+ stashState = stashState)
// needs to accept Any since we also can get messages from the journal
// not part of the protocol
val onStopInterceptor = new BehaviorInterceptor[Any, Any] {
import BehaviorInterceptor._
- def aroundReceive(ctx: typed.TypedActorContext[Any],
- msg: Any,
- target: ReceiveTarget[Any]): Behavior[Any] = {
+ def aroundReceive(ctx: typed.TypedActorContext[Any], msg: Any, target: ReceiveTarget[Any])
+ : Behavior[Any] = {
target(ctx, msg)
}
- def aroundSignal(ctx: typed.TypedActorContext[Any],
- signal: Signal,
- target: SignalTarget[Any]): Behavior[Any] = {
+ def aroundSignal(ctx: typed.TypedActorContext[Any], signal: Signal, target: SignalTarget[Any])
+ : Behavior[Any] = {
if (signal == PostStop) {
eventSourcedSetup.cancelRecoveryTimer()
// clear stash to be GC friendly
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala
index 06381f9acb..e76280154c 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala
@@ -40,12 +40,13 @@ import com.typesafe.config.Config
val recoveryEventTimeout: FiniteDuration =
journalConfig.getDuration("recovery-event-timeout", TimeUnit.MILLISECONDS).millis
- EventSourcedSettings(stashCapacity = stashCapacity,
- stashOverflowStrategy,
- logOnStashing = logOnStashing,
- recoveryEventTimeout,
- journalPluginId,
- snapshotPluginId)
+ EventSourcedSettings(
+ stashCapacity = stashCapacity,
+ stashOverflowStrategy,
+ logOnStashing = logOnStashing,
+ recoveryEventTimeout,
+ journalPluginId,
+ snapshotPluginId)
}
private[akka] final def journalConfigFor(config: Config, journalPluginId: String): Config = {
@@ -60,16 +61,18 @@ import com.typesafe.config.Config
* INTERNAL API
*/
@InternalApi
-private[akka] final case class EventSourcedSettings(stashCapacity: Int,
- stashOverflowStrategy: StashOverflowStrategy,
- logOnStashing: Boolean,
- recoveryEventTimeout: FiniteDuration,
- journalPluginId: String,
- snapshotPluginId: String) {
+private[akka] final case class EventSourcedSettings(
+ stashCapacity: Int,
+ stashOverflowStrategy: StashOverflowStrategy,
+ logOnStashing: Boolean,
+ recoveryEventTimeout: FiniteDuration,
+ journalPluginId: String,
+ snapshotPluginId: String) {
require(journalPluginId != null, "journal plugin id must not be null; use empty string for 'default' journal")
- require(snapshotPluginId != null,
- "snapshot plugin id must not be null; use empty string for 'default' snapshot store")
+ require(
+ snapshotPluginId != null,
+ "snapshot plugin id must not be null; use empty string for 'default' snapshot store")
}
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala
index b0ad200b0f..e1158c29c8 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala
@@ -33,11 +33,12 @@ private[akka] trait JournalInteractions[C, E, S] {
val newState = state.nextSequenceNr()
val senderNotKnownBecauseAkkaTyped = null
- val repr = PersistentRepr(event,
- persistenceId = setup.persistenceId.id,
- sequenceNr = newState.seqNr,
- writerUuid = setup.writerIdentity.writerUuid,
- sender = senderNotKnownBecauseAkkaTyped)
+ val repr = PersistentRepr(
+ event,
+ persistenceId = setup.persistenceId.id,
+ sequenceNr = newState.seqNr,
+ writerUuid = setup.writerIdentity.writerUuid,
+ sender = senderNotKnownBecauseAkkaTyped)
val write = AtomicWrite(repr) :: Nil
setup.journal
@@ -46,18 +47,20 @@ private[akka] trait JournalInteractions[C, E, S] {
newState
}
- protected def internalPersistAll(events: immutable.Seq[EventOrTagged],
- state: Running.RunningState[S]): Running.RunningState[S] = {
+ protected def internalPersistAll(
+ events: immutable.Seq[EventOrTagged],
+ state: Running.RunningState[S]): Running.RunningState[S] = {
if (events.nonEmpty) {
var newState = state
val writes = events.map { event =>
newState = newState.nextSequenceNr()
- PersistentRepr(event,
- persistenceId = setup.persistenceId.id,
- sequenceNr = newState.seqNr,
- writerUuid = setup.writerIdentity.writerUuid,
- sender = ActorRef.noSender)
+ PersistentRepr(
+ event,
+ persistenceId = setup.persistenceId.id,
+ sequenceNr = newState.seqNr,
+ writerUuid = setup.writerIdentity.writerUuid,
+ sender = ActorRef.noSender)
}
val write = AtomicWrite(writes)
@@ -71,11 +74,12 @@ private[akka] trait JournalInteractions[C, E, S] {
protected def replayEvents(fromSeqNr: Long, toSeqNr: Long): Unit = {
setup.log.debug("Replaying messages: from: {}, to: {}", fromSeqNr, toSeqNr)
- setup.journal ! ReplayMessages(fromSeqNr,
- toSeqNr,
- setup.recovery.replayMax,
- setup.persistenceId.id,
- setup.selfUntyped)
+ setup.journal ! ReplayMessages(
+ fromSeqNr,
+ toSeqNr,
+ setup.recovery.replayMax,
+ setup.persistenceId.id,
+ setup.selfUntyped)
}
protected def requestRecoveryPermit(): Unit = {
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala
index c71930a00a..b6fc2c6279 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala
@@ -39,11 +39,12 @@ import akka.persistence.typed.internal.Running.WithSeqNrAccessible
private[akka] object ReplayingEvents {
@InternalApi
- private[akka] final case class ReplayingState[State](seqNr: Long,
- state: State,
- eventSeenInInterval: Boolean,
- toSeqNr: Long,
- receivedPoisonPill: Boolean)
+ private[akka] final case class ReplayingState[State](
+ seqNr: Long,
+ state: State,
+ eventSeenInInterval: Boolean,
+ toSeqNr: Long,
+ receivedPoisonPill: Boolean)
def apply[C, E, S](setup: BehaviorSetup[C, E, S], state: ReplayingState[S]): Behavior[InternalProtocol] =
Behaviors.setup { ctx =>
@@ -55,8 +56,9 @@ private[akka] object ReplayingEvents {
}
@InternalApi
-private[akka] final class ReplayingEvents[C, E, S](override val setup: BehaviorSetup[C, E, S],
- var state: ReplayingState[S])
+private[akka] final class ReplayingEvents[C, E, S](
+ override val setup: BehaviorSetup[C, E, S],
+ var state: ReplayingState[S])
extends AbstractBehavior[InternalProtocol]
with JournalInteractions[C, E, S]
with StashManagement[C, E, S]
@@ -90,9 +92,10 @@ private[akka] final class ReplayingEvents[C, E, S](override val setup: BehaviorS
val event = setup.eventAdapter.fromJournal(repr.payload.asInstanceOf[setup.eventAdapter.Per])
try {
- state = state.copy(seqNr = repr.sequenceNr,
- state = setup.eventHandler(state.state, event),
- eventSeenInInterval = true)
+ state = state.copy(
+ seqNr = repr.sequenceNr,
+ state = setup.eventHandler(state.state, event),
+ eventSeenInInterval = true)
this
} catch {
case NonFatal(ex) => onRecoveryFailure(ex, repr.sequenceNr, Some(event))
@@ -156,9 +159,10 @@ private[akka] final class ReplayingEvents[C, E, S](override val setup: BehaviorS
* @param cause failure cause.
* @param message the message that was being processed when the exception was thrown
*/
- protected def onRecoveryFailure(cause: Throwable,
- sequenceNr: Long,
- message: Option[Any]): Behavior[InternalProtocol] = {
+ protected def onRecoveryFailure(
+ cause: Throwable,
+ sequenceNr: Long,
+ message: Option[Any]): Behavior[InternalProtocol] = {
try {
setup.onSignal(RecoveryFailed(cause))
} catch {
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala
index 8b6f32e4e3..ead6486afa 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala
@@ -102,13 +102,15 @@ private[akka] class ReplayingSnapshot[C, E, S](override val setup: BehaviorSetup
}
def onJournalResponse(response: JournalProtocol.Response): Behavior[InternalProtocol] = {
- setup.log.debug("Unexpected response from journal: [{}], may be due to an actor restart, ignoring...",
- response.getClass.getName)
+ setup.log.debug(
+ "Unexpected response from journal: [{}], may be due to an actor restart, ignoring...",
+ response.getClass.getName)
Behaviors.unhandled
}
- def onSnapshotterResponse(response: SnapshotProtocol.Response,
- receivedPoisonPill: Boolean): Behavior[InternalProtocol] = {
+ def onSnapshotterResponse(
+ response: SnapshotProtocol.Response,
+ receivedPoisonPill: Boolean): Behavior[InternalProtocol] = {
response match {
case LoadSnapshotResult(sso, toSnr) =>
var state: S = setup.emptyState
@@ -130,10 +132,11 @@ private[akka] class ReplayingSnapshot[C, E, S](override val setup: BehaviorSetup
}
}
- private def becomeReplayingEvents(state: S,
- lastSequenceNr: Long,
- toSnr: Long,
- receivedPoisonPill: Boolean): Behavior[InternalProtocol] = {
+ private def becomeReplayingEvents(
+ state: S,
+ lastSequenceNr: Long,
+ toSnr: Long,
+ receivedPoisonPill: Boolean): Behavior[InternalProtocol] = {
setup.cancelRecoveryTimer()
ReplayingEvents[C, E, S](
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala
index f87f855919..c76069c723 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala
@@ -110,15 +110,17 @@ private[akka] object Running {
applyEffects(cmd, state, effect.asInstanceOf[EffectImpl[E, S]]) // TODO can we avoid the cast?
}
- @tailrec def applyEffects(msg: Any,
- state: RunningState[S],
- effect: Effect[E, S],
- sideEffects: immutable.Seq[SideEffect[S]] = Nil): Behavior[InternalProtocol] = {
+ @tailrec def applyEffects(
+ msg: Any,
+ state: RunningState[S],
+ effect: Effect[E, S],
+ sideEffects: immutable.Seq[SideEffect[S]] = Nil): Behavior[InternalProtocol] = {
if (setup.log.isDebugEnabled && !effect.isInstanceOf[CompositeEffect[_, _]])
- setup.log.debug(s"Handled command [{}], resulting effect: [{}], side effects: [{}]",
- msg.getClass.getName,
- effect,
- sideEffects.size)
+ setup.log.debug(
+ s"Handled command [{}], resulting effect: [{}], side effects: [{}]",
+ msg.getClass.getName,
+ effect,
+ sideEffects.size)
effect match {
case CompositeEffect(eff, currentSideEffects) =>
@@ -194,19 +196,21 @@ private[akka] object Running {
// ===============================================
- def persistingEvents(state: RunningState[S],
- numberOfEvents: Int,
- shouldSnapshotAfterPersist: Boolean,
- sideEffects: immutable.Seq[SideEffect[S]]): Behavior[InternalProtocol] = {
+ def persistingEvents(
+ state: RunningState[S],
+ numberOfEvents: Int,
+ shouldSnapshotAfterPersist: Boolean,
+ sideEffects: immutable.Seq[SideEffect[S]]): Behavior[InternalProtocol] = {
setup.setMdc(persistingEventsMdc)
new PersistingEvents(state, numberOfEvents, shouldSnapshotAfterPersist, sideEffects)
}
/** INTERNAL API */
- @InternalApi private[akka] class PersistingEvents(var state: RunningState[S],
- numberOfEvents: Int,
- shouldSnapshotAfterPersist: Boolean,
- var sideEffects: immutable.Seq[SideEffect[S]])
+ @InternalApi private[akka] class PersistingEvents(
+ var state: RunningState[S],
+ numberOfEvents: Int,
+ shouldSnapshotAfterPersist: Boolean,
+ var sideEffects: immutable.Seq[SideEffect[S]])
extends AbstractBehavior[InternalProtocol]
with WithSeqNrAccessible {
@@ -366,9 +370,10 @@ private[akka] object Running {
behavior
}
- def applySideEffect(effect: SideEffect[S],
- state: RunningState[S],
- behavior: Behavior[InternalProtocol]): Behavior[InternalProtocol] = {
+ def applySideEffect(
+ effect: SideEffect[S],
+ state: RunningState[S],
+ behavior: Behavior[InternalProtocol]): Behavior[InternalProtocol] = {
effect match {
case _: Stop.type @unchecked =>
Behaviors.stopped
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala
index 9f5fca48e1..d450bebd9a 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala
@@ -97,23 +97,26 @@ private[akka] trait StashManagement[C, E, S] {
private def logStashMessage(msg: InternalProtocol, buffer: StashBuffer[InternalProtocol]): Unit = {
if (setup.settings.logOnStashing)
- setup.log.debug("Stashing message to {} stash: [{}] ",
- if (buffer eq stashState.internalStashBuffer) "internal" else "user",
- msg)
+ setup.log.debug(
+ "Stashing message to {} stash: [{}] ",
+ if (buffer eq stashState.internalStashBuffer) "internal" else "user",
+ msg)
}
private def logUnstashMessage(buffer: StashBuffer[InternalProtocol]): Unit = {
if (setup.settings.logOnStashing)
- setup.log.debug("Unstashing message from {} stash: [{}]",
- if (buffer eq stashState.internalStashBuffer) "internal" else "user",
- buffer.head)
+ setup.log.debug(
+ "Unstashing message from {} stash: [{}]",
+ if (buffer eq stashState.internalStashBuffer) "internal" else "user",
+ buffer.head)
}
private def logUnstashAll(): Unit = {
if (setup.settings.logOnStashing)
- setup.log.debug("Unstashing all [{}] messages from user stash, first is: [{}]",
- stashState.userStashBuffer.size,
- stashState.userStashBuffer.head)
+ setup.log.debug(
+ "Unstashing all [{}] messages from user stash, first is: [{}]",
+ stashState.userStashBuffer.size,
+ stashState.userStashBuffer.head)
}
}
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala
index 20eb5475ef..014b078ebe 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala
@@ -62,8 +62,9 @@ final class CommandHandlerBuilder[Command, Event, State]() {
*
* @return A new, mutable, CommandHandlerBuilderByState
*/
- def forState[S <: State](stateClass: Class[S],
- statePredicate: Predicate[S]): CommandHandlerBuilderByState[Command, Event, S, State] = {
+ def forState[S <: State](
+ stateClass: Class[S],
+ statePredicate: Predicate[S]): CommandHandlerBuilderByState[Command, Event, S, State] = {
val builder = new CommandHandlerBuilderByState[Command, Event, S, State](stateClass, statePredicate)
builders = builder.asInstanceOf[CommandHandlerBuilderByState[Command, Event, State, State]] :: builders
builder
@@ -196,15 +197,13 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int
private var cases: List[CommandHandlerCase[Command, Event, State]] = Nil
private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, Effect[Event, State]]): Unit = {
- cases = CommandHandlerCase[Command, Event, State](commandPredicate = predicate,
- statePredicate = state =>
- if (state == null) statePredicate.test(state.asInstanceOf[S])
- else
- statePredicate.test(state.asInstanceOf[S]) && stateClass
- .isAssignableFrom(state.getClass),
- handler.asInstanceOf[BiFunction[State,
- Command,
- Effect[Event, State]]]) :: cases
+ cases = CommandHandlerCase[Command, Event, State](
+ commandPredicate = predicate,
+ statePredicate = state =>
+ if (state == null) statePredicate.test(state.asInstanceOf[S])
+ else
+ statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass),
+ handler.asInstanceOf[BiFunction[State, Command, Effect[Event, State]]]) :: cases
}
/**
@@ -250,8 +249,9 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int
def onCommand[C <: Command](
commandClass: Class[C],
handler: BiFunction[S, C, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = {
- addCase(cmd => commandClass.isAssignableFrom(cmd.getClass),
- handler.asInstanceOf[BiFunction[S, Command, Effect[Event, State]]])
+ addCase(
+ cmd => commandClass.isAssignableFrom(cmd.getClass),
+ handler.asInstanceOf[BiFunction[S, Command, Effect[Event, State]]])
this
}
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala
index 14f8cb0fdb..8b3d42750a 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala
@@ -206,18 +206,16 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St
private var cases: List[CommandHandlerCase[Command, Event, State]] = Nil
- private def addCase(predicate: Command => Boolean,
- handler: BiFunction[S, Command, ReplyEffect[Event, State]]): Unit = {
- cases = CommandHandlerCase[Command, Event, State](commandPredicate = predicate,
- statePredicate = state =>
- if (state == null) statePredicate.test(state.asInstanceOf[S])
- else
- statePredicate.test(state.asInstanceOf[S]) && stateClass
- .isAssignableFrom(state.getClass),
- handler
- .asInstanceOf[BiFunction[State,
- Command,
- ReplyEffect[Event, State]]]) :: cases
+ private def addCase(
+ predicate: Command => Boolean,
+ handler: BiFunction[S, Command, ReplyEffect[Event, State]]): Unit = {
+ cases = CommandHandlerCase[Command, Event, State](
+ commandPredicate = predicate,
+ statePredicate = state =>
+ if (state == null) statePredicate.test(state.asInstanceOf[S])
+ else
+ statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass),
+ handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[Event, State]]]) :: cases
}
/**
@@ -260,8 +258,9 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St
*/
def onCommand[C <: Command](commandClass: Class[C], handler: BiFunction[S, C, ReplyEffect[Event, State]])
: CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = {
- addCase(cmd => commandClass.isAssignableFrom(cmd.getClass),
- handler.asInstanceOf[BiFunction[S, Command, ReplyEffect[Event, State]]])
+ addCase(
+ cmd => commandClass.isAssignableFrom(cmd.getClass),
+ handler.asInstanceOf[BiFunction[S, Command, ReplyEffect[Event, State]]])
this
}
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala
index 624a480380..c23956880f 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala
@@ -94,8 +94,9 @@ import akka.persistence.typed.internal._
* The reply message will be sent also if `withEnforcedReplies` isn't used, but then the compiler will not help
* finding mistakes.
*/
- def reply[ReplyMessage](cmd: ExpectingReply[ReplyMessage],
- replyWithMessage: ReplyMessage): ReplyEffect[Event, State] =
+ def reply[ReplyMessage](
+ cmd: ExpectingReply[ReplyMessage],
+ replyWithMessage: ReplyMessage): ReplyEffect[Event, State] =
none().thenReply[ReplyMessage](cmd, new function.Function[State, ReplyMessage] {
override def apply(param: State): ReplyMessage = replyWithMessage
})
@@ -168,8 +169,9 @@ import akka.persistence.typed.internal._
* The reply message will be sent also if `withEnforcedReplies` isn't used, but then the compiler will not help
* finding mistakes.
*/
- def thenReply[ReplyMessage](cmd: ExpectingReply[ReplyMessage],
- replyWithMessage: function.Function[State, ReplyMessage]): ReplyEffect[Event, State] =
+ def thenReply[ReplyMessage](
+ cmd: ExpectingReply[ReplyMessage],
+ replyWithMessage: function.Function[State, ReplyMessage]): ReplyEffect[Event, State] =
CompositeEffect(this, SideEffect[State](newState => cmd.replyTo ! replyWithMessage(newState)))
/**
diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala
index 6c1359601d..2eaed9f4d6 100644
--- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala
+++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala
@@ -62,8 +62,9 @@ final class EventHandlerBuilder[State >: Null, Event]() {
*
* @return A new, mutable, EventHandlerBuilderByState
*/
- def forState[S <: State](stateClass: Class[S],
- statePredicate: Predicate[S]): EventHandlerBuilderByState[S, State, Event] = {
+ def forState[S <: State](
+ stateClass: Class[S],
+ statePredicate: Predicate[S]): EventHandlerBuilderByState[S, State, Event] = {
val builder = new EventHandlerBuilderByState[S, State, Event](stateClass, statePredicate)
builders = builder.asInstanceOf[EventHandlerBuilderByState[State, State, Event]] :: builders
builder
@@ -180,13 +181,15 @@ object EventHandlerBuilderByState {
/**
* INTERNAL API
*/
- @InternalApi private final case class EventHandlerCase[State, Event](statePredicate: State => Boolean,
- eventPredicate: Event => Boolean,
- handler: BiFunction[State, Event, State])
+ @InternalApi private final case class EventHandlerCase[State, Event](
+ statePredicate: State => Boolean,
+ eventPredicate: Event => Boolean,
+ handler: BiFunction[State, Event, State])
}
-final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private val stateClass: Class[S],
- private val statePredicate: Predicate[S]) {
+final class EventHandlerBuilderByState[S <: State, State >: Null, Event](
+ private val stateClass: Class[S],
+ private val statePredicate: Predicate[S]) {
import EventHandlerBuilderByState.EventHandlerCase
@@ -208,8 +211,9 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private
* and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap,
* otherwise you risk to 'shadow' part of your event handlers.
*/
- def onEvent[E <: Event](eventClass: Class[E],
- handler: BiFunction[S, E, State]): EventHandlerBuilderByState[S, State, Event] = {
+ def onEvent[E <: Event](
+ eventClass: Class[E],
+ handler: BiFunction[S, E, State]): EventHandlerBuilderByState[S, State, Event] = {
addCase(e => eventClass.isAssignableFrom(e.getClass), handler.asInstanceOf[BiFunction[State, Event, State]])
this
}
@@ -224,8 +228,9 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private
* and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap,
* otherwise you risk to 'shadow' part of your event handlers.
*/
- def onEvent[E <: Event](eventClass: Class[E],
- handler: JFunction[E, State]): EventHandlerBuilderByState[S, State, Event] = {
+ def onEvent[E <: Event](
+ eventClass: Class[E],
+ handler: JFunction[E, State]): EventHandlerBuilderByState[S, State, Event] = {
onEvent[E](eventClass, new BiFunction[S, E, State] {
override def apply(state: S, event: E): State = handler(event)
})
@@ -240,8 +245,9 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private
* and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap,
* otherwise you risk to 'shadow' part of your event handlers.
*/
- def onEvent[E <: Event](eventClass: Class[E],
- handler: Supplier[State]): EventHandlerBuilderByState[S, State, Event] = {
+ def onEvent[E <: Event](
+ eventClass: Class[E],
+ handler: Supplier[State]): EventHandlerBuilderByState[S, State, Event] = {
val supplierBiFunction = new BiFunction[S, E, State] {
def apply(t: S, u: E): State = handler.get()
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala
index 98b35a50d3..2984fa8e81 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala
@@ -22,17 +22,19 @@ object ManyRecoveriesSpec {
final case class Evt(s: String)
- def persistentBehavior(name: String,
- probe: TestProbe[String],
- latch: Option[TestLatch]): EventSourcedBehavior[Cmd, Evt, String] =
- EventSourcedBehavior[Cmd, Evt, String](persistenceId = PersistenceId(name),
- emptyState = "",
- commandHandler = CommandHandler.command {
- case Cmd(s) => Effect.persist(Evt(s)).thenRun(_ => probe.ref ! s"$name-$s")
- },
- eventHandler = {
- case (state, _) => latch.foreach(Await.ready(_, 10.seconds)); state
- })
+ def persistentBehavior(
+ name: String,
+ probe: TestProbe[String],
+ latch: Option[TestLatch]): EventSourcedBehavior[Cmd, Evt, String] =
+ EventSourcedBehavior[Cmd, Evt, String](
+ persistenceId = PersistenceId(name),
+ emptyState = "",
+ commandHandler = CommandHandler.command {
+ case Cmd(s) => Effect.persist(Evt(s)).thenRun(_ => probe.ref ! s"$name-$s")
+ },
+ eventHandler = {
+ case (state, _) => latch.foreach(Await.ready(_, 10.seconds)); state
+ })
def forwardBehavior(sender: TestProbe[String]): Behaviors.Receive[Int] =
Behaviors.receiveMessagePartial[Int] {
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala
index 0e2ef0179f..d29ea759ac 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala
@@ -40,19 +40,21 @@ object RecoveryPermitterSpec {
case object Recovered extends Event
- def persistentBehavior(name: String,
- commandProbe: TestProbe[Any],
- eventProbe: TestProbe[Any],
- throwOnRecovery: Boolean = false): Behavior[Command] =
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId(name),
- emptyState = EmptyState,
- commandHandler = CommandHandler.command {
- case StopActor => Effect.stop()
- case command => commandProbe.ref ! command; Effect.none
- },
- eventHandler = { (state, event) =>
- eventProbe.ref ! event; state
- }).receiveSignal {
+ def persistentBehavior(
+ name: String,
+ commandProbe: TestProbe[Any],
+ eventProbe: TestProbe[Any],
+ throwOnRecovery: Boolean = false): Behavior[Command] =
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId(name),
+ emptyState = EmptyState,
+ commandHandler = CommandHandler.command {
+ case StopActor => Effect.stop()
+ case command => commandProbe.ref ! command; Effect.none
+ },
+ eventHandler = { (state, event) =>
+ eventProbe.ref ! event; state
+ }).receiveSignal {
case RecoveryCompleted(state) =>
eventProbe.ref ! Recovered
if (throwOnRecovery) throw new TE
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala
index f8a267a11d..a6c45e9f81 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala
@@ -58,11 +58,12 @@ class StashStateSpec extends ScalaTestWithActorTestKit with WordSpecLike {
}
private def dummySettings(capacity: Int = 42) =
- EventSourcedSettings(stashCapacity = capacity,
- stashOverflowStrategy = StashOverflowStrategy.Fail,
- logOnStashing = false,
- recoveryEventTimeout = 3.seconds,
- journalPluginId = "",
- snapshotPluginId = "")
+ EventSourcedSettings(
+ stashCapacity = capacity,
+ stashOverflowStrategy = StashOverflowStrategy.Fail,
+ logOnStashing = false,
+ recoveryEventTimeout = 3.seconds,
+ journalPluginId = "",
+ snapshotPluginId = "")
}
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala
index b9fb323f03..9d39f26ec5 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala
@@ -80,22 +80,24 @@ class EventSourcedBehaviorFailureSpec
implicit val testSettings: TestKitSettings = TestKitSettings(system)
- def failingPersistentActor(pid: PersistenceId,
- probe: ActorRef[String],
- additionalSignalHandler: PartialFunction[Signal, Unit] = PartialFunction.empty)
+ def failingPersistentActor(
+ pid: PersistenceId,
+ probe: ActorRef[String],
+ additionalSignalHandler: PartialFunction[Signal, Unit] = PartialFunction.empty)
: EventSourcedBehavior[String, String, String] =
- EventSourcedBehavior[String, String, String](pid,
- "",
- (_, cmd) ⇒ {
- if (cmd == "wrong")
- throw new TestException("wrong command")
- probe.tell("persisting")
- Effect.persist(cmd)
- },
- (state, event) ⇒ {
- probe.tell(event)
- state + event
- })
+ EventSourcedBehavior[String, String, String](
+ pid,
+ "",
+ (_, cmd) ⇒ {
+ if (cmd == "wrong")
+ throw new TestException("wrong command")
+ probe.tell("persisting")
+ Effect.persist(cmd)
+ },
+ (state, event) ⇒ {
+ probe.tell(event)
+ state + event
+ })
.receiveSignal(additionalSignalHandler.orElse {
case RecoveryCompleted(_) ⇒
probe.tell("starting")
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala
index b7dce6c4bd..661610e439 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala
@@ -43,38 +43,33 @@ object EventSourcedBehaviorReplySpec {
def counter(persistenceId: PersistenceId): Behavior[Command[_]] =
Behaviors.setup(ctx => counter(ctx, persistenceId))
- def counter(ctx: ActorContext[Command[_]],
- persistenceId: PersistenceId): EventSourcedBehavior[Command[_], Event, State] = {
- EventSourcedBehavior.withEnforcedReplies[Command[_], Event, State](persistenceId,
- emptyState = State(0, Vector.empty),
- commandHandler = (state, command) =>
- command match {
+ def counter(
+ ctx: ActorContext[Command[_]],
+ persistenceId: PersistenceId): EventSourcedBehavior[Command[_], Event, State] = {
+ EventSourcedBehavior.withEnforcedReplies[Command[_], Event, State](
+ persistenceId,
+ emptyState = State(0, Vector.empty),
+ commandHandler = (state, command) =>
+ command match {
- case cmd: IncrementWithConfirmation =>
- Effect
- .persist(Incremented(1))
- .thenReply(cmd)(_ => Done)
+ case cmd: IncrementWithConfirmation =>
+ Effect.persist(Incremented(1)).thenReply(cmd)(_ => Done)
- case cmd: IncrementReplyLater =>
- Effect
- .persist(Incremented(1))
- .thenRun((_: State) =>
- ctx.self ! ReplyNow(cmd.replyTo))
- .thenNoReply()
+ case cmd: IncrementReplyLater =>
+ Effect.persist(Incremented(1)).thenRun((_: State) => ctx.self ! ReplyNow(cmd.replyTo)).thenNoReply()
- case cmd: ReplyNow =>
- Effect.reply(cmd)(Done)
+ case cmd: ReplyNow =>
+ Effect.reply(cmd)(Done)
- case query: GetValue =>
- Effect.reply(query)(state)
+ case query: GetValue =>
+ Effect.reply(query)(state)
- },
- eventHandler = (state, evt) =>
- evt match {
- case Incremented(delta) =>
- State(state.value + delta,
- state.history :+ state.value)
- })
+ },
+ eventHandler = (state, evt) =>
+ evt match {
+ case Incremented(delta) =>
+ State(state.value + delta, state.history :+ state.value)
+ })
}
}
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala
index 0c7fc3665a..172177a423 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala
@@ -129,20 +129,22 @@ object EventSourcedBehaviorSpec {
def counter(ctx: ActorContext[Command], persistenceId: PersistenceId)(
implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] =
- counter(ctx,
- persistenceId,
- loggingActor = TestProbe[String].ref,
- probe = TestProbe[(State, Event)].ref,
- TestProbe[Try[Done]].ref)
+ counter(
+ ctx,
+ persistenceId,
+ loggingActor = TestProbe[String].ref,
+ probe = TestProbe[(State, Event)].ref,
+ TestProbe[Try[Done]].ref)
def counter(ctx: ActorContext[Command], persistenceId: PersistenceId, logging: ActorRef[String])(
implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] =
counter(ctx, persistenceId, loggingActor = logging, probe = TestProbe[(State, Event)].ref, TestProbe[Try[Done]].ref)
- def counterWithProbe(ctx: ActorContext[Command],
- persistenceId: PersistenceId,
- probe: ActorRef[(State, Event)],
- snapshotProbe: ActorRef[Try[Done]])(
+ def counterWithProbe(
+ ctx: ActorContext[Command],
+ persistenceId: PersistenceId,
+ probe: ActorRef[(State, Event)],
+ snapshotProbe: ActorRef[Try[Done]])(
implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] =
counter(ctx, persistenceId, TestProbe[String].ref, probe, snapshotProbe)
@@ -154,111 +156,112 @@ object EventSourcedBehaviorSpec {
implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] =
counter(ctx, persistenceId, TestProbe[String].ref, TestProbe[(State, Event)].ref, snapshotProbe = probe)
- def counter(ctx: ActorContext[Command],
- persistenceId: PersistenceId,
- loggingActor: ActorRef[String],
- probe: ActorRef[(State, Event)],
- snapshotProbe: ActorRef[Try[Done]]): EventSourcedBehavior[Command, Event, State] = {
- EventSourcedBehavior[Command, Event, State](persistenceId,
- emptyState = State(0, Vector.empty),
- commandHandler = (state, cmd) =>
- cmd match {
- case Increment =>
- Effect.persist(Incremented(1))
+ def counter(
+ ctx: ActorContext[Command],
+ persistenceId: PersistenceId,
+ loggingActor: ActorRef[String],
+ probe: ActorRef[(State, Event)],
+ snapshotProbe: ActorRef[Try[Done]]): EventSourcedBehavior[Command, Event, State] = {
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId,
+ emptyState = State(0, Vector.empty),
+ commandHandler = (state, cmd) =>
+ cmd match {
+ case Increment =>
+ Effect.persist(Incremented(1))
- case IncrementThenLogThenStop =>
- Effect
- .persist(Incremented(1))
- .thenRun { (_: State) =>
- loggingActor ! firstLogging
- }
- .thenStop
+ case IncrementThenLogThenStop =>
+ Effect
+ .persist(Incremented(1))
+ .thenRun { (_: State) =>
+ loggingActor ! firstLogging
+ }
+ .thenStop
- case IncrementTwiceThenLogThenStop =>
- Effect
- .persist(Incremented(1), Incremented(2))
- .thenRun { (_: State) =>
- loggingActor ! firstLogging
- }
- .thenStop
+ case IncrementTwiceThenLogThenStop =>
+ Effect
+ .persist(Incremented(1), Incremented(2))
+ .thenRun { (_: State) =>
+ loggingActor ! firstLogging
+ }
+ .thenStop
- case IncrementWithPersistAll(n) =>
- Effect.persist((0 until n).map(_ => Incremented(1)))
+ case IncrementWithPersistAll(n) =>
+ Effect.persist((0 until n).map(_ => Incremented(1)))
- case cmd: IncrementWithConfirmation =>
- Effect.persist(Incremented(1)).thenReply(cmd)(_ => Done)
+ case cmd: IncrementWithConfirmation =>
+ Effect.persist(Incremented(1)).thenReply(cmd)(_ => Done)
- case GetValue(replyTo) =>
- replyTo ! state
- Effect.none
+ case GetValue(replyTo) =>
+ replyTo ! state
+ Effect.none
- case IncrementLater =>
- // purpose is to test signals
- val delay = ctx.spawnAnonymous(Behaviors.withTimers[Tick.type] {
- timers =>
- timers.startSingleTimer(Tick, Tick, 10.millis)
- Behaviors.receive((_, msg) =>
- msg match {
- case Tick => Behaviors.stopped
- })
- })
- ctx.watchWith(delay, DelayFinished)
- Effect.none
+ case IncrementLater =>
+ // purpose is to test signals
+ val delay = ctx.spawnAnonymous(Behaviors.withTimers[Tick.type] { timers =>
+ timers.startSingleTimer(Tick, Tick, 10.millis)
+ Behaviors.receive((_, msg) =>
+ msg match {
+ case Tick => Behaviors.stopped
+ })
+ })
+ ctx.watchWith(delay, DelayFinished)
+ Effect.none
- case DelayFinished =>
- Effect.persist(Incremented(10))
+ case DelayFinished =>
+ Effect.persist(Incremented(10))
- case IncrementAfterReceiveTimeout =>
- ctx.setReceiveTimeout(10.millis, Timeout)
- Effect.none
+ case IncrementAfterReceiveTimeout =>
+ ctx.setReceiveTimeout(10.millis, Timeout)
+ Effect.none
- case Timeout =>
- ctx.cancelReceiveTimeout()
- Effect.persist(Incremented(100))
+ case Timeout =>
+ ctx.cancelReceiveTimeout()
+ Effect.persist(Incremented(100))
- case IncrementTwiceAndThenLog =>
- Effect
- .persist(Incremented(1), Incremented(1))
- .thenRun { (_: State) =>
- loggingActor ! firstLogging
- }
- .thenRun { _ =>
- loggingActor ! secondLogging
- }
+ case IncrementTwiceAndThenLog =>
+ Effect
+ .persist(Incremented(1), Incremented(1))
+ .thenRun { (_: State) =>
+ loggingActor ! firstLogging
+ }
+ .thenRun { _ =>
+ loggingActor ! secondLogging
+ }
- case EmptyEventsListAndThenLog =>
- Effect
- .persist(List.empty) // send empty list of events
- .thenRun { _ =>
- loggingActor ! firstLogging
- }
+ case EmptyEventsListAndThenLog =>
+ Effect
+ .persist(List.empty) // send empty list of events
+ .thenRun { _ =>
+ loggingActor ! firstLogging
+ }
- case DoNothingAndThenLog =>
- Effect.none.thenRun { _ =>
- loggingActor ! firstLogging
- }
+ case DoNothingAndThenLog =>
+ Effect.none.thenRun { _ =>
+ loggingActor ! firstLogging
+ }
- case LogThenStop =>
- Effect
- .none[Event, State]
- .thenRun { _ =>
- loggingActor ! firstLogging
- }
- .thenStop
+ case LogThenStop =>
+ Effect
+ .none[Event, State]
+ .thenRun { _ =>
+ loggingActor ! firstLogging
+ }
+ .thenStop
- case Fail =>
- throw new TestException("boom!")
+ case Fail =>
+ throw new TestException("boom!")
- case StopIt =>
- Effect.none.thenStop()
+ case StopIt =>
+ Effect.none.thenStop()
- },
- eventHandler = (state, evt) ⇒
- evt match {
- case Incremented(delta) ⇒
- probe ! ((state, evt))
- State(state.value + delta, state.history :+ state.value)
- }).receiveSignal {
+ },
+ eventHandler = (state, evt) ⇒
+ evt match {
+ case Incremented(delta) ⇒
+ probe ! ((state, evt))
+ State(state.value + delta, state.history :+ state.value)
+ }).receiveSignal {
case RecoveryCompleted(_) ⇒ ()
case SnapshotCompleted(_) ⇒
snapshotProbe ! Success(Done)
@@ -606,8 +609,9 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh
replyProbe.expectMessage(State(2, Vector(0, 1)))
val events = queries.currentEventsByPersistenceId(pid.id).runWith(Sink.seq).futureValue
- events shouldEqual List(EventEnvelope(Sequence(1), pid.id, 1, Wrapper(Incremented(1))),
- EventEnvelope(Sequence(2), pid.id, 2, Wrapper(Incremented(1))))
+ events shouldEqual List(
+ EventEnvelope(Sequence(1), pid.id, 1, Wrapper(Incremented(1))),
+ EventEnvelope(Sequence(2), pid.id, 2, Wrapper(Incremented(1))))
val c2 = spawn(Behaviors.setup[Command](ctx => counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event])))
c2 ! GetValue(replyProbe.ref)
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala
index 987fc2d74f..47c848e6af 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala
@@ -77,26 +77,27 @@ object EventSourcedBehaviorStashSpec {
def eventSourcedCounter(persistenceId: PersistenceId): EventSourcedBehavior[Command[_], Event, State] = {
EventSourcedBehavior
- .withEnforcedReplies[Command[_], Event, State](persistenceId,
- emptyState = State(0, active = true),
- commandHandler = (state, command) => {
- if (state.active) active(state, command)
- else inactive(state, command)
- },
- eventHandler = (state, evt) =>
- evt match {
- case Incremented(delta) =>
- if (!state.active) throw new IllegalStateException
- State(state.value + delta, active = true)
- case ValueUpdated(value) =>
- State(value, active = state.active)
- case Activated =>
- if (state.active) throw new IllegalStateException
- state.copy(active = true)
- case Deactivated =>
- if (!state.active) throw new IllegalStateException
- state.copy(active = false)
- })
+ .withEnforcedReplies[Command[_], Event, State](
+ persistenceId,
+ emptyState = State(0, active = true),
+ commandHandler = (state, command) => {
+ if (state.active) active(state, command)
+ else inactive(state, command)
+ },
+ eventHandler = (state, evt) =>
+ evt match {
+ case Incremented(delta) =>
+ if (!state.active) throw new IllegalStateException
+ State(state.value + delta, active = true)
+ case ValueUpdated(value) =>
+ State(value, active = state.active)
+ case Activated =>
+ if (state.active) throw new IllegalStateException
+ state.copy(active = true)
+ case Deactivated =>
+ if (!state.active) throw new IllegalStateException
+ state.copy(active = false)
+ })
.onPersistFailure(SupervisorStrategy
.restartWithBackoff(1.second, maxBackoff = 2.seconds, 0.0)
.withLoggingEnabled(enabled = false))
@@ -488,40 +489,40 @@ class EventSourcedBehaviorStashSpec
"discard when stash has reached limit with default dropped setting" in {
val probe = TestProbe[AnyRef]()
system.toUntyped.eventStream.subscribe(probe.ref.toUntyped, classOf[Dropped])
- val behavior = EventSourcedBehavior[String, String, Boolean](persistenceId = PersistenceId("stash-is-full-drop"),
- emptyState = false,
- commandHandler = { (state, command) =>
- state match {
- case false =>
- command match {
- case "ping" =>
- probe.ref ! "pong"
- Effect.none
- case "start-stashing" =>
- Effect.persist("start-stashing")
- case msg =>
- probe.ref ! msg
- Effect.none
- }
+ val behavior = EventSourcedBehavior[String, String, Boolean](
+ persistenceId = PersistenceId("stash-is-full-drop"),
+ emptyState = false,
+ commandHandler = { (state, command) =>
+ state match {
+ case false =>
+ command match {
+ case "ping" =>
+ probe.ref ! "pong"
+ Effect.none
+ case "start-stashing" =>
+ Effect.persist("start-stashing")
+ case msg =>
+ probe.ref ! msg
+ Effect.none
+ }
- case true =>
- command match {
- case "unstash" =>
- Effect
- .persist("unstash")
- .thenUnstashAll()
- // FIXME #26489: this is run before unstash, so not sequentially as the docs say
- .thenRun(_ =>
- probe.ref ! "done-unstashing")
- case _ =>
- Effect.stash()
- }
- }
- }, {
- case (_, "start-stashing") => true
- case (_, "unstash") => false
- case (_, _) => throw new IllegalArgumentException()
- })
+ case true =>
+ command match {
+ case "unstash" =>
+ Effect
+ .persist("unstash")
+ .thenUnstashAll()
+ // FIXME #26489: this is run before unstash, so not sequentially as the docs say
+ .thenRun(_ => probe.ref ! "done-unstashing")
+ case _ =>
+ Effect.stash()
+ }
+ }
+ }, {
+ case (_, "start-stashing") => true
+ case (_, "unstash") => false
+ case (_, _) => throw new IllegalArgumentException()
+ })
val c = spawn(behavior)
@@ -552,10 +553,11 @@ class EventSourcedBehaviorStashSpec
"fail when stash has reached limit if configured to fail" in {
// persistence settings is system wide, so we need to have a custom testkit/actorsystem here
- val failStashTestKit = ActorTestKit("EventSourcedBehaviorStashSpec-stash-overflow-fail",
- ConfigFactory
- .parseString("akka.persistence.typed.stash-overflow-strategy=fail")
- .withFallback(EventSourcedBehaviorStashSpec.conf))
+ val failStashTestKit = ActorTestKit(
+ "EventSourcedBehaviorStashSpec-stash-overflow-fail",
+ ConfigFactory
+ .parseString("akka.persistence.typed.stash-overflow-strategy=fail")
+ .withFallback(EventSourcedBehaviorStashSpec.conf))
try {
val probe = failStashTestKit.createTestProbe[AnyRef]()
val behavior =
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala
index 4b948f4299..9c72ce9d1d 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala
@@ -25,18 +25,19 @@ class NullEmptyStateSpec extends ScalaTestWithActorTestKit(NullEmptyStateSpec.co
implicit val testSettings = TestKitSettings(system)
def primitiveState(persistenceId: PersistenceId, probe: ActorRef[String]): Behavior[String] =
- EventSourcedBehavior[String, String, String](persistenceId,
- emptyState = null,
- commandHandler = (_, command) => {
- if (command == "stop")
- Effect.stop()
- else
- Effect.persist(command)
- },
- eventHandler = (state, event) => {
- probe.tell("eventHandler:" + state + ":" + event)
- if (state == null) event else state + event
- }).receiveSignal {
+ EventSourcedBehavior[String, String, String](
+ persistenceId,
+ emptyState = null,
+ commandHandler = (_, command) => {
+ if (command == "stop")
+ Effect.stop()
+ else
+ Effect.persist(command)
+ },
+ eventHandler = (state, event) => {
+ probe.tell("eventHandler:" + state + ":" + event)
+ if (state == null) event else state + event
+ }).receiveSignal {
case RecoveryCompleted(s) ⇒
probe.tell("onRecoveryCompleted:" + s)
}
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala
index d2be023b06..790615e3fe 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala
@@ -25,14 +25,15 @@ object OptionalSnapshotStoreSpec {
case class Event(id: Long = System.currentTimeMillis())
def persistentBehavior(probe: TestProbe[State], name: String = UUID.randomUUID().toString) =
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId(name),
- emptyState = State(),
- commandHandler = CommandHandler.command { _ =>
- Effect.persist(Event()).thenRun(probe.ref ! _)
- },
- eventHandler = {
- case (_, _) => State()
- }).snapshotWhen { case _ => true }
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId(name),
+ emptyState = State(),
+ commandHandler = CommandHandler.command { _ =>
+ Effect.persist(Event()).thenRun(probe.ref ! _)
+ },
+ eventHandler = {
+ case (_, _) => State()
+ }).snapshotWhen { case _ => true }
def persistentBehaviorWithSnapshotPlugin(probe: TestProbe[State]) =
persistentBehavior(probe).withSnapshotPluginId("akka.persistence.snapshot-store.local")
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala
index f5d5e0b06c..0806f76dac 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala
@@ -71,18 +71,19 @@ object PerformanceSpec {
Behaviors
.supervise({
val parameters = Parameters()
- EventSourcedBehavior[Command, String, String](persistenceId = PersistenceId(name),
- "",
- commandHandler = CommandHandler.command {
- case StopMeasure ⇒
- Effect.none.thenRun(_ => probe.ref ! StopMeasure)
- case FailAt(sequence) ⇒
- Effect.none.thenRun(_ => parameters.failAt = sequence)
- case command ⇒ other(command, parameters)
- },
- eventHandler = {
- case (state, _) => state
- }).receiveSignal {
+ EventSourcedBehavior[Command, String, String](
+ persistenceId = PersistenceId(name),
+ "",
+ commandHandler = CommandHandler.command {
+ case StopMeasure ⇒
+ Effect.none.thenRun(_ => probe.ref ! StopMeasure)
+ case FailAt(sequence) ⇒
+ Effect.none.thenRun(_ => parameters.failAt = sequence)
+ case command ⇒ other(command, parameters)
+ },
+ eventHandler = {
+ case (state, _) => state
+ }).receiveSignal {
case RecoveryCompleted(_) =>
if (parameters.every(1000)) print("r")
}
@@ -123,10 +124,11 @@ class PerformanceSpec extends ScalaTestWithActorTestKit(ConfigFactory.parseStrin
val loadCycles = system.settings.config.getInt("akka.persistence.performance.cycles.load")
- def stressPersistentActor(persistentActor: ActorRef[Command],
- probe: TestProbe[Reply],
- failAt: Option[Long],
- description: String): Unit = {
+ def stressPersistentActor(
+ persistentActor: ActorRef[Command],
+ probe: TestProbe[Reply],
+ failAt: Option[Long],
+ description: String): Unit = {
failAt.foreach { persistentActor ! FailAt(_) }
val m = new Measure(loadCycles)
m.startMeasure()
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala
index 0cdc7373da..b02fc3baa5 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala
@@ -30,17 +30,18 @@ object PersistentActorCompileOnlyTest {
case class ExampleState(events: List[String] = Nil)
- EventSourcedBehavior[MyCommand, MyEvent, ExampleState](persistenceId = PersistenceId("sample-id-1"),
- emptyState = ExampleState(Nil),
- commandHandler = CommandHandler.command {
- case Cmd(data, sender) =>
- Effect.persist(Evt(data)).thenRun { _ =>
- sender ! Ack
- }
- },
- eventHandler = {
- case (state, Evt(data)) => state.copy(data :: state.events)
- })
+ EventSourcedBehavior[MyCommand, MyEvent, ExampleState](
+ persistenceId = PersistenceId("sample-id-1"),
+ emptyState = ExampleState(Nil),
+ commandHandler = CommandHandler.command {
+ case Cmd(data, sender) =>
+ Effect.persist(Evt(data)).thenRun { _ =>
+ sender ! Ack
+ }
+ },
+ eventHandler = {
+ case (state, Evt(data)) => state.copy(data :: state.events)
+ })
}
object RecoveryComplete {
@@ -73,33 +74,27 @@ object PersistentActorCompileOnlyTest {
val behavior: Behavior[Command] =
Behaviors.setup(
ctx =>
- EventSourcedBehavior[Command, Event, EventsInFlight](persistenceId = PersistenceId("recovery-complete-id"),
- emptyState = EventsInFlight(0, Map.empty),
- commandHandler = (state, cmd) =>
- cmd match {
- case DoSideEffect(data) =>
- Effect
- .persist(
- IntentRecorded(state.nextCorrelationId, data))
- .thenRun { _ =>
- performSideEffect(ctx.self,
- state.nextCorrelationId,
- data)
- }
- case AcknowledgeSideEffect(correlationId) =>
- Effect.persist(
- SideEffectAcknowledged(correlationId))
- },
- eventHandler = (state, evt) =>
- evt match {
- case IntentRecorded(correlationId, data) =>
- EventsInFlight(
- nextCorrelationId = correlationId + 1,
- dataByCorrelationId = state.dataByCorrelationId + (correlationId → data))
- case SideEffectAcknowledged(correlationId) =>
- state.copy(
- dataByCorrelationId = state.dataByCorrelationId - correlationId)
- }).receiveSignal {
+ EventSourcedBehavior[Command, Event, EventsInFlight](
+ persistenceId = PersistenceId("recovery-complete-id"),
+ emptyState = EventsInFlight(0, Map.empty),
+ commandHandler = (state, cmd) =>
+ cmd match {
+ case DoSideEffect(data) =>
+ Effect.persist(IntentRecorded(state.nextCorrelationId, data)).thenRun { _ =>
+ performSideEffect(ctx.self, state.nextCorrelationId, data)
+ }
+ case AcknowledgeSideEffect(correlationId) =>
+ Effect.persist(SideEffectAcknowledged(correlationId))
+ },
+ eventHandler = (state, evt) =>
+ evt match {
+ case IntentRecorded(correlationId, data) =>
+ EventsInFlight(
+ nextCorrelationId = correlationId + 1,
+ dataByCorrelationId = state.dataByCorrelationId + (correlationId → data))
+ case SideEffectAcknowledged(correlationId) =>
+ state.copy(dataByCorrelationId = state.dataByCorrelationId - correlationId)
+ }).receiveSignal {
case RecoveryCompleted(state: EventsInFlight) =>
state.dataByCorrelationId.foreach {
case (correlationId, data) => performSideEffect(ctx.self, correlationId, data)
@@ -164,18 +159,19 @@ object PersistentActorCompileOnlyTest {
case class State(tasksInFlight: List[Task])
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("asdf"),
- emptyState = State(Nil),
- commandHandler = CommandHandler.command {
- case RegisterTask(task) => Effect.persist(TaskRegistered(task))
- case TaskDone(task) => Effect.persist(TaskRemoved(task))
- },
- eventHandler = (state, evt) =>
- evt match {
- case TaskRegistered(task) => State(task :: state.tasksInFlight)
- case TaskRemoved(task) =>
- State(state.tasksInFlight.filter(_ != task))
- }).snapshotWhen { (state, e, seqNr) =>
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("asdf"),
+ emptyState = State(Nil),
+ commandHandler = CommandHandler.command {
+ case RegisterTask(task) => Effect.persist(TaskRegistered(task))
+ case TaskDone(task) => Effect.persist(TaskRemoved(task))
+ },
+ eventHandler = (state, evt) =>
+ evt match {
+ case TaskRegistered(task) => State(task :: state.tasksInFlight)
+ case TaskRemoved(task) =>
+ State(state.tasksInFlight.filter(_ != task))
+ }).snapshotWhen { (state, e, seqNr) =>
state.tasksInFlight.isEmpty
}
}
@@ -196,24 +192,25 @@ object PersistentActorCompileOnlyTest {
val behavior: Behavior[Command] = Behaviors.setup(
ctx =>
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("asdf"),
- emptyState = State(Nil),
- commandHandler = (_, cmd) =>
- cmd match {
- case RegisterTask(task) =>
- Effect.persist(TaskRegistered(task)).thenRun { _ =>
- val child = ctx.spawn[Nothing](worker(task), task)
- // This assumes *any* termination of the child may trigger a `TaskDone`:
- ctx.watchWith(child, TaskDone(task))
- }
- case TaskDone(task) => Effect.persist(TaskRemoved(task))
- },
- eventHandler = (state, evt) =>
- evt match {
- case TaskRegistered(task) => State(task :: state.tasksInFlight)
- case TaskRemoved(task) =>
- State(state.tasksInFlight.filter(_ != task))
- }))
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("asdf"),
+ emptyState = State(Nil),
+ commandHandler = (_, cmd) =>
+ cmd match {
+ case RegisterTask(task) =>
+ Effect.persist(TaskRegistered(task)).thenRun { _ =>
+ val child = ctx.spawn[Nothing](worker(task), task)
+ // This assumes *any* termination of the child may trigger a `TaskDone`:
+ ctx.watchWith(child, TaskDone(task))
+ }
+ case TaskDone(task) => Effect.persist(TaskRemoved(task))
+ },
+ eventHandler = (state, evt) =>
+ evt match {
+ case TaskRegistered(task) => State(task :: state.tasksInFlight)
+ case TaskRemoved(task) =>
+ State(state.tasksInFlight.filter(_ != task))
+ }))
}
@@ -257,40 +254,41 @@ object PersistentActorCompileOnlyTest {
def addItem(id: Id, self: ActorRef[Command]) =
Effect.persist[Event, List[Id]](ItemAdded(id)).thenRun(_ => metadataRegistry ! GetMetaData(id, adapt))
- EventSourcedBehavior[Command, Event, List[Id]](persistenceId = PersistenceId("basket-1"),
- emptyState = Nil,
- commandHandler = { (state, cmd) =>
- if (isFullyHydrated(basket, state))
- cmd match {
- case AddItem(id) => addItem(id, ctx.self)
- case RemoveItem(id) => Effect.persist(ItemRemoved(id))
- case GotMetaData(data) =>
- basket = basket.updatedWith(data)
- Effect.none
- case GetTotalPrice(sender) =>
- sender ! basket.items.map(_.price).sum
- Effect.none
- } else
- cmd match {
- case AddItem(id) => addItem(id, ctx.self)
- case RemoveItem(id) => Effect.persist(ItemRemoved(id))
- case GotMetaData(data) =>
- basket = basket.updatedWith(data)
- if (isFullyHydrated(basket, state)) {
- stash.foreach(ctx.self ! _)
- stash = Nil
- }
- Effect.none
- case cmd: GetTotalPrice =>
- stash :+= cmd
- Effect.none
- }
- },
- eventHandler = (state, evt) =>
- evt match {
- case ItemAdded(id) => id +: state
- case ItemRemoved(id) => state.filter(_ != id)
- }).receiveSignal {
+ EventSourcedBehavior[Command, Event, List[Id]](
+ persistenceId = PersistenceId("basket-1"),
+ emptyState = Nil,
+ commandHandler = { (state, cmd) =>
+ if (isFullyHydrated(basket, state))
+ cmd match {
+ case AddItem(id) => addItem(id, ctx.self)
+ case RemoveItem(id) => Effect.persist(ItemRemoved(id))
+ case GotMetaData(data) =>
+ basket = basket.updatedWith(data)
+ Effect.none
+ case GetTotalPrice(sender) =>
+ sender ! basket.items.map(_.price).sum
+ Effect.none
+ } else
+ cmd match {
+ case AddItem(id) => addItem(id, ctx.self)
+ case RemoveItem(id) => Effect.persist(ItemRemoved(id))
+ case GotMetaData(data) =>
+ basket = basket.updatedWith(data)
+ if (isFullyHydrated(basket, state)) {
+ stash.foreach(ctx.self ! _)
+ stash = Nil
+ }
+ Effect.none
+ case cmd: GetTotalPrice =>
+ stash :+= cmd
+ Effect.none
+ }
+ },
+ eventHandler = (state, evt) =>
+ evt match {
+ case ItemAdded(id) => id +: state
+ case ItemRemoved(id) => state.filter(_ != id)
+ }).receiveSignal {
case RecoveryCompleted(state: List[Id]) =>
state.foreach(id => metadataRegistry ! GetMetaData(id, adapt))
}
@@ -350,10 +348,11 @@ object PersistentActorCompileOnlyTest {
case (state, Remembered(_)) => state
}
- EventSourcedBehavior[Command, Event, Mood](persistenceId = PersistenceId("myPersistenceId"),
- emptyState = Sad,
- commandHandler,
- eventHandler)
+ EventSourcedBehavior[Command, Event, Mood](
+ persistenceId = PersistenceId("myPersistenceId"),
+ emptyState = Sad,
+ commandHandler,
+ eventHandler)
}
@@ -375,10 +374,11 @@ object PersistentActorCompileOnlyTest {
case (state, Done) => state
}
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("myPersistenceId"),
- emptyState = new State,
- commandHandler,
- eventHandler)
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("myPersistenceId"),
+ emptyState = new State,
+ commandHandler,
+ eventHandler)
}
object AndThenPatternMatch {
@@ -386,18 +386,19 @@ object PersistentActorCompileOnlyTest {
class First extends State
class Second extends State
- EventSourcedBehavior[String, String, State](persistenceId = PersistenceId("myPersistenceId"),
- emptyState = new First,
- commandHandler = CommandHandler.command { cmd =>
- Effect.persist(cmd).thenRun {
- case _: First => println("first")
- case _: Second => println("second")
- }
- },
- eventHandler = {
- case (_: First, _) => new Second
- case (state, _) => state
- })
+ EventSourcedBehavior[String, String, State](
+ persistenceId = PersistenceId("myPersistenceId"),
+ emptyState = new First,
+ commandHandler = CommandHandler.command { cmd =>
+ Effect.persist(cmd).thenRun {
+ case _: First => println("first")
+ case _: Second => println("second")
+ }
+ },
+ eventHandler = {
+ case (_: First, _) => new Second
+ case (state, _) => state
+ })
}
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala
index 7ac59fd3b0..f20aaf161a 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala
@@ -25,18 +25,19 @@ class PrimitiveStateSpec extends ScalaTestWithActorTestKit(PrimitiveStateSpec.co
implicit val testSettings = TestKitSettings(system)
def primitiveState(persistenceId: PersistenceId, probe: ActorRef[String]): Behavior[Int] =
- EventSourcedBehavior[Int, Int, Int](persistenceId,
- emptyState = 0,
- commandHandler = (_, command) => {
- if (command < 0)
- Effect.stop()
- else
- Effect.persist(command)
- },
- eventHandler = (state, event) => {
- probe.tell("eventHandler:" + state + ":" + event)
- state + event
- }).receiveSignal {
+ EventSourcedBehavior[Int, Int, Int](
+ persistenceId,
+ emptyState = 0,
+ commandHandler = (_, command) => {
+ if (command < 0)
+ Effect.stop()
+ else
+ Effect.persist(command)
+ },
+ eventHandler = (state, event) => {
+ probe.tell("eventHandler:" + state + ":" + event)
+ state + event
+ }).receiveSignal {
case RecoveryCompleted(n) =>
probe.tell("onRecoveryCompleted:" + n)
}
diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala
index 9ba38a8f5f..f9329438ea 100644
--- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala
+++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala
@@ -73,26 +73,28 @@ object SnapshotMutableStateSpec {
final class MutableState(var value: Int)
- def counter(persistenceId: PersistenceId,
- probe: ActorRef[String]): EventSourcedBehavior[Command, Event, MutableState] = {
- EventSourcedBehavior[Command, Event, MutableState](persistenceId,
- emptyState = new MutableState(0),
- commandHandler = (state, cmd) =>
- cmd match {
- case Increment =>
- Effect.persist(Incremented)
+ def counter(
+ persistenceId: PersistenceId,
+ probe: ActorRef[String]): EventSourcedBehavior[Command, Event, MutableState] = {
+ EventSourcedBehavior[Command, Event, MutableState](
+ persistenceId,
+ emptyState = new MutableState(0),
+ commandHandler = (state, cmd) =>
+ cmd match {
+ case Increment =>
+ Effect.persist(Incremented)
- case GetValue(replyTo) =>
- replyTo ! state.value
- Effect.none
- },
- eventHandler = (state, evt) =>
- evt match {
- case Incremented =>
- state.value += 1
- probe ! s"incremented-${state.value}"
- state
- }).receiveSignal {
+ case GetValue(replyTo) =>
+ replyTo ! state.value
+ Effect.none
+ },
+ eventHandler = (state, evt) =>
+ evt match {
+ case Incremented =>
+ state.value += 1
+ probe ! s"incremented-${state.value}"
+ state
+ }).receiveSignal {
case SnapshotCompleted(meta) =>
probe ! s"snapshot-success-${meta.sequenceNr}"
case SnapshotFailed(meta, _) =>
diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala
index a8d0f73220..2564e5756c 100644
--- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala
+++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala
@@ -95,10 +95,11 @@ object AccountExampleWithEventHandlersInState {
//#withEnforcedReplies
def behavior(accountNumber: String): Behavior[AccountCommand[AccountCommandReply]] = {
- EventSourcedBehavior.withEnforcedReplies(PersistenceId(s"Account|$accountNumber"),
- EmptyAccount,
- commandHandler,
- eventHandler)
+ EventSourcedBehavior.withEnforcedReplies(
+ PersistenceId(s"Account|$accountNumber"),
+ EmptyAccount,
+ commandHandler,
+ eventHandler)
}
//#withEnforcedReplies
diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala
index e1e7d8d666..2a98290b91 100644
--- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala
+++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala
@@ -52,10 +52,11 @@ object BasicPersistentBehaviorCompileOnly {
//#behavior
def behavior(id: String): EventSourcedBehavior[Command, Event, State] =
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId(id),
- emptyState = State(Nil),
- commandHandler = commandHandler,
- eventHandler = eventHandler)
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId(id),
+ emptyState = State(Nil),
+ commandHandler = commandHandler,
+ eventHandler = eventHandler)
//#behavior
}
@@ -66,53 +67,42 @@ object BasicPersistentBehaviorCompileOnly {
final case class State()
val behavior: Behavior[Command] =
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"),
- emptyState = State(),
- commandHandler = (state, cmd) =>
- throw new RuntimeException(
- "TODO: process the command & return an Effect"),
- eventHandler = (state, evt) =>
- throw new RuntimeException(
- "TODO: process the event return the next state"))
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("abc"),
+ emptyState = State(),
+ commandHandler = (state, cmd) => throw new RuntimeException("TODO: process the command & return an Effect"),
+ eventHandler = (state, evt) => throw new RuntimeException("TODO: process the event return the next state"))
//#structure
//#recovery
val recoveryBehavior: Behavior[Command] =
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"),
- emptyState = State(),
- commandHandler = (state, cmd) =>
- throw new RuntimeException(
- "TODO: process the command & return an Effect"),
- eventHandler = (state, evt) =>
- throw new RuntimeException(
- "TODO: process the event return the next state")).receiveSignal {
- case RecoveryCompleted(state) ⇒
- throw new RuntimeException("TODO: add some end-of-recovery side-effect here")
- }
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("abc"),
+ emptyState = State(),
+ commandHandler = (state, cmd) => throw new RuntimeException("TODO: process the command & return an Effect"),
+ eventHandler = (state, evt) => throw new RuntimeException("TODO: process the event return the next state"))
+ .receiveSignal {
+ case RecoveryCompleted(state) ⇒
+ throw new RuntimeException("TODO: add some end-of-recovery side-effect here")
+ }
//#recovery
//#tagging
val taggingBehavior: Behavior[Command] =
- EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"),
- emptyState = State(),
- commandHandler = (state, cmd) =>
- throw new RuntimeException(
- "TODO: process the command & return an Effect"),
- eventHandler = (state, evt) =>
- throw new RuntimeException(
- "TODO: process the event return the next state")).withTagger(_ =>
- Set("tag1", "tag2"))
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("abc"),
+ emptyState = State(),
+ commandHandler = (state, cmd) => throw new RuntimeException("TODO: process the command & return an Effect"),
+ eventHandler = (state, evt) => throw new RuntimeException("TODO: process the event return the next state"))
+ .withTagger(_ => Set("tag1", "tag2"))
//#tagging
//#wrapPersistentBehavior
- val samplePersistentBehavior = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"),
- emptyState = State(),
- commandHandler = (state, cmd) =>
- throw new RuntimeException(
- "TODO: process the command & return an Effect"),
- eventHandler = (state, evt) =>
- throw new RuntimeException(
- "TODO: process the event return the next state"))
+ val samplePersistentBehavior = EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("abc"),
+ emptyState = State(),
+ commandHandler = (state, cmd) => throw new RuntimeException("TODO: process the command & return an Effect"),
+ eventHandler = (state, evt) => throw new RuntimeException("TODO: process the event return the next state"))
.receiveSignal {
case RecoveryCompleted(state) ⇒
throw new RuntimeException("TODO: add some end-of-recovery side-effect here")
@@ -137,42 +127,37 @@ object BasicPersistentBehaviorCompileOnly {
val behaviorWithContext: Behavior[String] =
Behaviors.setup { context =>
- EventSourcedBehavior[String, String, State](persistenceId = PersistenceId("myPersistenceId"),
- emptyState = new State,
- commandHandler = CommandHandler.command { cmd =>
- context.log.info("Got command {}", cmd)
- Effect.persist(cmd).thenRun { state =>
- context.log.info("event persisted, new state {}", state)
- }
- },
- eventHandler = {
- case (state, _) => state
- })
+ EventSourcedBehavior[String, String, State](
+ persistenceId = PersistenceId("myPersistenceId"),
+ emptyState = new State,
+ commandHandler = CommandHandler.command { cmd =>
+ context.log.info("Got command {}", cmd)
+ Effect.persist(cmd).thenRun { state =>
+ context.log.info("event persisted, new state {}", state)
+ }
+ },
+ eventHandler = {
+ case (state, _) => state
+ })
}
// #actor-context
//#snapshottingEveryN
- val snapshottingEveryN = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"),
- emptyState = State(),
- commandHandler = (state, cmd) =>
- throw new RuntimeException(
- "TODO: process the command & return an Effect"),
- eventHandler = (state, evt) =>
- throw new RuntimeException(
- "TODO: process the event return the next state"))
+ val snapshottingEveryN = EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("abc"),
+ emptyState = State(),
+ commandHandler = (state, cmd) => throw new RuntimeException("TODO: process the command & return an Effect"),
+ eventHandler = (state, evt) => throw new RuntimeException("TODO: process the event return the next state"))
.snapshotEvery(100)
//#snapshottingEveryN
final case class BookingCompleted(orderNr: String) extends Event
//#snapshottingPredicate
- val snapshottingPredicate = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"),
- emptyState = State(),
- commandHandler = (state, cmd) =>
- throw new RuntimeException(
- "TODO: process the command & return an Effect"),
- eventHandler = (state, evt) =>
- throw new RuntimeException(
- "TODO: process the event return the next state"))
+ val snapshottingPredicate = EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("abc"),
+ emptyState = State(),
+ commandHandler = (state, cmd) => throw new RuntimeException("TODO: process the command & return an Effect"),
+ eventHandler = (state, evt) => throw new RuntimeException("TODO: process the event return the next state"))
.snapshotWhen {
case (state, BookingCompleted(_), sequenceNumber) => true
case (state, event, sequenceNumber) => false
@@ -182,14 +167,11 @@ object BasicPersistentBehaviorCompileOnly {
//#snapshotSelection
import akka.persistence.SnapshotSelectionCriteria
- val snapshotSelection = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"),
- emptyState = State(),
- commandHandler = (state, cmd) =>
- throw new RuntimeException(
- "TODO: process the command & return an Effect"),
- eventHandler = (state, evt) =>
- throw new RuntimeException(
- "TODO: process the event return the next state"))
+ val snapshotSelection = EventSourcedBehavior[Command, Event, State](
+ persistenceId = PersistenceId("abc"),
+ emptyState = State(),
+ commandHandler = (state, cmd) => throw new RuntimeException("TODO: process the command & return an Effect"),
+ eventHandler = (state, evt) => throw new RuntimeException("TODO: process the event return the next state"))
.withSnapshotSelectionCriteria(SnapshotSelectionCriteria.None)
//#snapshotSelection
diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala
index 8d2bb43e45..797e8e1643 100644
--- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala
+++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala
@@ -52,10 +52,11 @@ object BlogPostExample {
//#behavior
def behavior(entityId: String): Behavior[BlogCommand] =
- EventSourcedBehavior[BlogCommand, BlogEvent, BlogState](persistenceId = PersistenceId(s"Blog-$entityId"),
- emptyState = BlankState,
- commandHandler,
- eventHandler)
+ EventSourcedBehavior[BlogCommand, BlogEvent, BlogState](
+ persistenceId = PersistenceId(s"Blog-$entityId"),
+ emptyState = BlankState,
+ commandHandler,
+ eventHandler)
//#behavior
//#command-handler
diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala
index 76068e7fd3..b3c0fb04f9 100644
--- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala
+++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala
@@ -43,10 +43,11 @@ object MovieWatchList {
}
def behavior(userId: String): Behavior[Command] = {
- EventSourcedBehavior[Command, Event, MovieList](persistenceId = PersistenceId(s"movies-$userId"),
- emptyState = MovieList(Set.empty),
- commandHandler,
- eventHandler = (state, event) => state.applyEvent(event))
+ EventSourcedBehavior[Command, Event, MovieList](
+ persistenceId = PersistenceId(s"movies-$userId"),
+ emptyState = MovieList(Set.empty),
+ commandHandler,
+ eventHandler = (state, event) => state.applyEvent(event))
}
}
diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala
index ecb6237ad5..555932d8a3 100644
--- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala
+++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala
@@ -30,10 +30,11 @@ object StashingExample {
final case class State(taskIdInProgress: Option[String])
def apply(persistenceId: PersistenceId): Behavior[Command] =
- EventSourcedBehavior[Command, Event, State](persistenceId = persistenceId,
- emptyState = State(None),
- commandHandler = (state, command) => onCommand(state, command),
- eventHandler = (state, event) => applyEvent(state, event))
+ EventSourcedBehavior[Command, Event, State](
+ persistenceId = persistenceId,
+ emptyState = State(None),
+ commandHandler = (state, command) => onCommand(state, command),
+ eventHandler = (state, event) => applyEvent(state, event))
.onPersistFailure(SupervisorStrategy.restartWithBackoff(1.second, 30.seconds, 0.2))
private def onCommand(state: State, command: Command): Effect[Event, State] = {
diff --git a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala
index 1aaeeb7671..3fed3aabb9 100644
--- a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala
@@ -24,8 +24,9 @@ object AtLeastOnceDelivery {
* with [[AtLeastOnceDeliveryLike#setDeliverySnapshot]].
*/
@SerialVersionUID(1L)
- case class AtLeastOnceDeliverySnapshot(currentDeliveryId: Long,
- unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery])
+ case class AtLeastOnceDeliverySnapshot(
+ currentDeliveryId: Long,
+ unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery])
extends Message {
/**
@@ -282,10 +283,11 @@ trait AtLeastOnceDeliveryLike extends Eventsourced {
@InternalApi
private[akka] final def internalDeliver(destination: ActorSelection)(deliveryIdToMessage: Long => Any): Unit = {
val isWildcardSelection = destination.pathString.contains("*")
- require(!isWildcardSelection,
- "Delivering to wildcard actor selections is not supported by AtLeastOnceDelivery. " +
- "Introduce an mediator Actor which this AtLeastOnceDelivery Actor will deliver the messages to," +
- "and will handle the logic of fan-out and collecting individual confirmations, until it can signal confirmation back to this Actor.")
+ require(
+ !isWildcardSelection,
+ "Delivering to wildcard actor selections is not supported by AtLeastOnceDelivery. " +
+ "Introduce an mediator Actor which this AtLeastOnceDelivery Actor will deliver the messages to," +
+ "and will handle the logic of fan-out and collecting individual confirmations, until it can signal confirmation back to this Actor.")
internalDeliver(ActorPath.fromString(destination.toSerializationFormat))(deliveryIdToMessage)
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala
index 5f5d502b6d..19d20d119a 100644
--- a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala
@@ -153,18 +153,20 @@ private[persistence] trait Eventsourced
protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit =
event match {
case Some(evt) =>
- log.error(cause,
- "Exception in receiveRecover when replaying event type [{}] with sequence number [{}] for " +
- "persistenceId [{}].",
- evt.getClass.getName,
- lastSequenceNr,
- persistenceId)
+ log.error(
+ cause,
+ "Exception in receiveRecover when replaying event type [{}] with sequence number [{}] for " +
+ "persistenceId [{}].",
+ evt.getClass.getName,
+ lastSequenceNr,
+ persistenceId)
case None =>
- log.error(cause,
- "Persistence failure when replaying events for persistenceId [{}]. " +
- "Last known sequence number [{}]",
- persistenceId,
- lastSequenceNr)
+ log.error(
+ cause,
+ "Persistence failure when replaying events for persistenceId [{}]. " +
+ "Last known sequence number [{}]",
+ persistenceId,
+ lastSequenceNr)
}
/**
@@ -181,11 +183,12 @@ private[persistence] trait Eventsourced
* @param event the event that was to be persisted
*/
protected def onPersistFailure(cause: Throwable, event: Any, seqNr: Long): Unit = {
- log.error(cause,
- "Failed to persist event type [{}] with sequence number [{}] for persistenceId [{}].",
- event.getClass.getName,
- seqNr,
- persistenceId)
+ log.error(
+ cause,
+ "Failed to persist event type [{}] with sequence number [{}] for persistenceId [{}].",
+ event.getClass.getName,
+ seqNr,
+ persistenceId)
}
/**
@@ -197,12 +200,13 @@ private[persistence] trait Eventsourced
* @param event the event that was to be persisted
*/
protected def onPersistRejected(cause: Throwable, event: Any, seqNr: Long): Unit = {
- log.error(cause,
- "Rejected to persist event type [{}] with sequence number [{}] for persistenceId [{}] due to [{}].",
- event.getClass.getName,
- seqNr,
- persistenceId,
- cause.getMessage)
+ log.error(
+ cause,
+ "Rejected to persist event type [{}] with sequence number [{}] for persistenceId [{}] due to [{}].",
+ event.getClass.getName,
+ seqNr,
+ persistenceId,
+ cause.getMessage)
}
private def stashInternally(currMsg: Any): Unit =
@@ -296,26 +300,30 @@ private[persistence] trait Eventsourced
message match {
case RecoveryCompleted => // mute
case SaveSnapshotFailure(m, e) =>
- log.warning("Failed to saveSnapshot given metadata [{}] due to: [{}: {}]",
- m,
- e.getClass.getCanonicalName,
- e.getMessage)
+ log.warning(
+ "Failed to saveSnapshot given metadata [{}] due to: [{}: {}]",
+ m,
+ e.getClass.getCanonicalName,
+ e.getMessage)
case DeleteSnapshotFailure(m, e) =>
- log.warning("Failed to deleteSnapshot given metadata [{}] due to: [{}: {}]",
- m,
- e.getClass.getCanonicalName,
- e.getMessage)
+ log.warning(
+ "Failed to deleteSnapshot given metadata [{}] due to: [{}: {}]",
+ m,
+ e.getClass.getCanonicalName,
+ e.getMessage)
case DeleteSnapshotsFailure(c, e) =>
- log.warning("Failed to deleteSnapshots given criteria [{}] due to: [{}: {}]",
- c,
- e.getClass.getCanonicalName,
- e.getMessage)
+ log.warning(
+ "Failed to deleteSnapshots given criteria [{}] due to: [{}: {}]",
+ c,
+ e.getClass.getCanonicalName,
+ e.getMessage)
case DeleteMessagesFailure(e, toSequenceNr) =>
- log.warning("Failed to deleteMessages toSequenceNr [{}] for persistenceId [{}] due to [{}: {}].",
- toSequenceNr,
- persistenceId,
- e.getClass.getCanonicalName,
- e.getMessage)
+ log.warning(
+ "Failed to deleteMessages toSequenceNr [{}] for persistenceId [{}] due to [{}: {}].",
+ toSequenceNr,
+ persistenceId,
+ e.getClass.getCanonicalName,
+ e.getMessage)
case m => super.unhandled(m)
}
}
@@ -378,11 +386,12 @@ private[persistence] trait Eventsourced
pendingStashingPersistInvocations += 1
pendingInvocations.addLast(StashingHandlerInvocation(event, handler.asInstanceOf[Any => Unit]))
eventBatch ::= AtomicWrite(
- PersistentRepr(event,
- persistenceId = persistenceId,
- sequenceNr = nextSequenceNr(),
- writerUuid = writerUuid,
- sender = sender()))
+ PersistentRepr(
+ event,
+ persistenceId = persistenceId,
+ sequenceNr = nextSequenceNr(),
+ writerUuid = writerUuid,
+ sender = sender()))
}
/**
@@ -400,11 +409,12 @@ private[persistence] trait Eventsourced
}
eventBatch ::= AtomicWrite(
events.map(
- PersistentRepr.apply(_,
- persistenceId = persistenceId,
- sequenceNr = nextSequenceNr(),
- writerUuid = writerUuid,
- sender = sender())))
+ PersistentRepr.apply(
+ _,
+ persistenceId = persistenceId,
+ sequenceNr = nextSequenceNr(),
+ writerUuid = writerUuid,
+ sender = sender())))
}
}
@@ -418,11 +428,12 @@ private[persistence] trait Eventsourced
"Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.")
pendingInvocations.addLast(AsyncHandlerInvocation(event, handler.asInstanceOf[Any => Unit]))
eventBatch ::= AtomicWrite(
- PersistentRepr(event,
- persistenceId = persistenceId,
- sequenceNr = nextSequenceNr(),
- writerUuid = writerUuid,
- sender = sender()))
+ PersistentRepr(
+ event,
+ persistenceId = persistenceId,
+ sequenceNr = nextSequenceNr(),
+ writerUuid = writerUuid,
+ sender = sender()))
}
/**
@@ -439,11 +450,12 @@ private[persistence] trait Eventsourced
}
eventBatch ::= AtomicWrite(
events.map(
- PersistentRepr(_,
- persistenceId = persistenceId,
- sequenceNr = nextSequenceNr(),
- writerUuid = writerUuid,
- sender = sender())))
+ PersistentRepr(
+ _,
+ persistenceId = persistenceId,
+ sequenceNr = nextSequenceNr(),
+ writerUuid = writerUuid,
+ sender = sender())))
}
}
@@ -513,9 +525,10 @@ private[persistence] trait Eventsourced
* Or delete all by using `Long.MaxValue` as the `toSequenceNr`
* {{{ m.copy(sequenceNr = Long.MaxValue) }}}
*/
- @InternalApi private[akka] def internalDeleteMessagesBeforeSnapshot(e: SaveSnapshotSuccess,
- keepNrOfBatches: Int,
- snapshotAfter: Int): Unit = {
+ @InternalApi private[akka] def internalDeleteMessagesBeforeSnapshot(
+ e: SaveSnapshotSuccess,
+ keepNrOfBatches: Int,
+ snapshotAfter: Int): Unit = {
/* Delete old events but keep the latest around
1. It's not safe to delete all events immediately because snapshots are typically stored with
a weaker consistency level. A replay might "see" the deleted events before it sees the stored
@@ -648,8 +661,9 @@ private[persistence] trait Eventsourced
returnRecoveryPermit()
case RecoveryTick(true) =>
- try onRecoveryFailure(new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"),
- event = None)
+ try onRecoveryFailure(
+ new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"),
+ event = None)
finally context.stop(self)
returnRecoveryPermit()
diff --git a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala
index 21a01b9317..1f3cef3b4a 100644
--- a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala
@@ -37,9 +37,10 @@ private[persistence] object JournalProtocol {
* @param messages messages to be written.
* @param persistentActor write requestor.
*/
- final case class WriteMessages(messages: immutable.Seq[PersistentEnvelope],
- persistentActor: ActorRef,
- actorInstanceId: Int)
+ final case class WriteMessages(
+ messages: immutable.Seq[PersistentEnvelope],
+ persistentActor: ActorRef,
+ actorInstanceId: Int)
extends Request
with NoSerializationVerificationNeeded
@@ -106,11 +107,12 @@ private[persistence] object JournalProtocol {
* @param persistenceId requesting persistent actor id.
* @param persistentActor requesting persistent actor.
*/
- final case class ReplayMessages(fromSequenceNr: Long,
- toSequenceNr: Long,
- max: Long,
- persistenceId: String,
- persistentActor: ActorRef)
+ final case class ReplayMessages(
+ fromSequenceNr: Long,
+ toSequenceNr: Long,
+ max: Long,
+ persistenceId: String,
+ persistentActor: ActorRef)
extends Request
/**
diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala
index 74c6bbff70..ca5fa27e5a 100644
--- a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala
@@ -219,8 +219,9 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
// Lazy, so user is not forced to configure defaults when she is not using them.
lazy val defaultInternalStashOverflowStrategy: StashOverflowStrategy =
system.dynamicAccess
- .createInstanceFor[StashOverflowStrategyConfigurator](config.getString("internal-stash-overflow-strategy"),
- EmptyImmutableSeq)
+ .createInstanceFor[StashOverflowStrategyConfigurator](
+ config.getString("internal-stash-overflow-strategy"),
+ EmptyImmutableSeq)
.map(_.create(system.settings.config))
.get
@@ -289,8 +290,9 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
* When empty, looks in `akka.persistence.journal.plugin` to find configuration entry path.
* When configured, uses `journalPluginId` as absolute path to the journal configuration entry.
*/
- private[akka] final def journalConfigFor(journalPluginId: String,
- journalPluginConfig: Config = ConfigFactory.empty): Config = {
+ private[akka] final def journalConfigFor(
+ journalPluginId: String,
+ journalPluginConfig: Config = ConfigFactory.empty): Config = {
val configPath = if (isEmpty(journalPluginId)) defaultJournalPluginId else journalPluginId
pluginHolderFor(configPath, JournalFallbackConfigPath, journalPluginConfig).config
}
@@ -314,8 +316,9 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
* When configured, uses `journalPluginId` as absolute path to the journal configuration entry.
* Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`.
*/
- private[akka] final def journalFor(journalPluginId: String,
- journalPluginConfig: Config = ConfigFactory.empty): ActorRef = {
+ private[akka] final def journalFor(
+ journalPluginId: String,
+ journalPluginConfig: Config = ConfigFactory.empty): ActorRef = {
val configPath = if (isEmpty(journalPluginId)) defaultJournalPluginId else journalPluginId
pluginHolderFor(configPath, JournalFallbackConfigPath, journalPluginConfig).actor
}
@@ -328,15 +331,17 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
* When configured, uses `snapshotPluginId` as absolute path to the snapshot store configuration entry.
* Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`.
*/
- private[akka] final def snapshotStoreFor(snapshotPluginId: String,
- snapshotPluginConfig: Config = ConfigFactory.empty): ActorRef = {
+ private[akka] final def snapshotStoreFor(
+ snapshotPluginId: String,
+ snapshotPluginConfig: Config = ConfigFactory.empty): ActorRef = {
val configPath = if (isEmpty(snapshotPluginId)) defaultSnapshotPluginId else snapshotPluginId
pluginHolderFor(configPath, SnapshotStoreFallbackConfigPath, snapshotPluginConfig).actor
}
- @tailrec private def pluginHolderFor(configPath: String,
- fallbackPath: String,
- additionalConfig: Config): PluginHolder = {
+ @tailrec private def pluginHolderFor(
+ configPath: String,
+ fallbackPath: String,
+ additionalConfig: Config): PluginHolder = {
val extensionIdMap = pluginExtensionId.get
extensionIdMap.get(configPath) match {
case Some(extensionId) =>
@@ -392,8 +397,9 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
override def createExtension(system: ExtendedActorSystem): PluginHolder = {
val mergedConfig = additionalConfig.withFallback(system.settings.config)
- require(!isEmpty(configPath) && mergedConfig.hasPath(configPath),
- s"'reference.conf' is missing persistence plugin config path: '$configPath'")
+ require(
+ !isEmpty(configPath) && mergedConfig.hasPath(configPath),
+ s"'reference.conf' is missing persistence plugin config path: '$configPath'")
val config: Config = mergedConfig.getConfig(configPath).withFallback(mergedConfig.getConfig(fallbackPath))
val plugin: ActorRef = createPlugin(configPath, config)
val adapters: EventAdapters = createAdapters(configPath, mergedConfig)
diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala b/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala
index 46edd3203a..380800f03b 100644
--- a/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala
@@ -21,8 +21,9 @@ import scala.util.Failure
*/
@InternalApi
private[akka] object PersistencePlugin {
- final private[persistence] case class PluginHolder[ScalaDsl, JavaDsl](scaladslPlugin: ScalaDsl,
- javadslPlugin: JavaDsl)
+ final private[persistence] case class PluginHolder[ScalaDsl, JavaDsl](
+ scaladslPlugin: ScalaDsl,
+ javadslPlugin: JavaDsl)
extends Extension
}
@@ -66,8 +67,9 @@ private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](s
private def createPlugin(configPath: String, readJournalPluginConfig: Config): T = {
val mergedConfig = readJournalPluginConfig.withFallback(system.settings.config)
- require(!isEmpty(configPath) && mergedConfig.hasPath(configPath),
- s"'reference.conf' is missing persistence plugin config path: '$configPath'")
+ require(
+ !isEmpty(configPath) && mergedConfig.hasPath(configPath),
+ s"'reference.conf' is missing persistence plugin config path: '$configPath'")
val pluginConfig = mergedConfig.getConfig(configPath)
val pluginClassName = pluginConfig.getString("class")
log.debug(s"Create plugin: $configPath $pluginClassName")
@@ -88,9 +90,10 @@ private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](s
.recoverWith {
case ex: Exception =>
Failure.apply(
- new IllegalArgumentException("Unable to create read journal plugin instance for path " +
- s"[$configPath], class [$pluginClassName]!",
- ex))
+ new IllegalArgumentException(
+ "Unable to create read journal plugin instance for path " +
+ s"[$configPath], class [$pluginClassName]!",
+ ex))
}
.get
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala
index d8f8f4eaf2..079d37bf1b 100644
--- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala
@@ -120,11 +120,12 @@ trait PersistentRepr extends Message {
/**
* Creates a new copy of this [[PersistentRepr]].
*/
- def update(sequenceNr: Long = sequenceNr,
- persistenceId: String = persistenceId,
- deleted: Boolean = deleted,
- sender: ActorRef = sender,
- writerUuid: String = writerUuid): PersistentRepr
+ def update(
+ sequenceNr: Long = sequenceNr,
+ persistenceId: String = persistenceId,
+ deleted: Boolean = deleted,
+ sender: ActorRef = sender,
+ writerUuid: String = writerUuid): PersistentRepr
}
object PersistentRepr {
@@ -138,13 +139,14 @@ object PersistentRepr {
/**
* Plugin API.
*/
- def apply(payload: Any,
- sequenceNr: Long = 0L,
- persistenceId: String = PersistentRepr.Undefined,
- manifest: String = PersistentRepr.Undefined,
- deleted: Boolean = false,
- sender: ActorRef = null,
- writerUuid: String = PersistentRepr.Undefined): PersistentRepr =
+ def apply(
+ payload: Any,
+ sequenceNr: Long = 0L,
+ persistenceId: String = PersistentRepr.Undefined,
+ manifest: String = PersistentRepr.Undefined,
+ deleted: Boolean = false,
+ sender: ActorRef = null,
+ writerUuid: String = PersistentRepr.Undefined): PersistentRepr =
PersistentImpl(payload, sequenceNr, persistenceId, manifest, deleted, sender, writerUuid)
/**
@@ -162,13 +164,14 @@ object PersistentRepr {
/**
* INTERNAL API.
*/
-private[persistence] final case class PersistentImpl(override val payload: Any,
- override val sequenceNr: Long,
- override val persistenceId: String,
- override val manifest: String,
- override val deleted: Boolean,
- override val sender: ActorRef,
- override val writerUuid: String)
+private[persistence] final case class PersistentImpl(
+ override val payload: Any,
+ override val sequenceNr: Long,
+ override val persistenceId: String,
+ override val manifest: String,
+ override val deleted: Boolean,
+ override val sender: ActorRef,
+ override val writerUuid: String)
extends PersistentRepr
with NoSerializationVerificationNeeded {
@@ -180,10 +183,11 @@ private[persistence] final case class PersistentImpl(override val payload: Any,
else copy(manifest = manifest)
def update(sequenceNr: Long, persistenceId: String, deleted: Boolean, sender: ActorRef, writerUuid: String) =
- copy(sequenceNr = sequenceNr,
- persistenceId = persistenceId,
- deleted = deleted,
- sender = sender,
- writerUuid = writerUuid)
+ copy(
+ sequenceNr = sequenceNr,
+ persistenceId = persistenceId,
+ deleted = deleted,
+ sender = sender,
+ writerUuid = writerUuid)
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala
index e8a980a4ad..a4dc89339d 100644
--- a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala
@@ -58,9 +58,10 @@ final case class DeleteMessagesFailure(cause: Throwable, toSequenceNr: Long)
* @param replayMax maximum number of messages to replay. Default is no limit.
*/
@SerialVersionUID(1L)
-final case class Recovery(fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest,
- toSequenceNr: Long = Long.MaxValue,
- replayMax: Long = Long.MaxValue)
+final case class Recovery(
+ fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest,
+ toSequenceNr: Long = Long.MaxValue,
+ replayMax: Long = Long.MaxValue)
object Recovery {
diff --git a/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala b/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala
index a1c6173807..4d4fecd745 100644
--- a/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala
@@ -70,9 +70,10 @@ import akka.actor.Terminated
recoveryPermitGranted(ref)
}
if (pending.isEmpty && maxPendingStats > 0) {
- log.debug("Drained pending recovery permit requests, max in progress was [{}], still [{}] in progress",
- usedPermits + maxPendingStats,
- usedPermits)
+ log.debug(
+ "Drained pending recovery permit requests, max in progress was [{}], still [{}] in progress",
+ usedPermits + maxPendingStats,
+ usedPermits)
maxPendingStats = 0
}
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala
index 17ae2c10ab..2ee4e6bc8d 100644
--- a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala
@@ -99,10 +99,11 @@ final case class SnapshotOffer(metadata: SnapshotMetadata, snapshot: Any)
* @see [[Recovery]]
*/
@SerialVersionUID(1L)
-final case class SnapshotSelectionCriteria(maxSequenceNr: Long = Long.MaxValue,
- maxTimestamp: Long = Long.MaxValue,
- minSequenceNr: Long = 0L,
- minTimestamp: Long = 0L) {
+final case class SnapshotSelectionCriteria(
+ maxSequenceNr: Long = Long.MaxValue,
+ maxTimestamp: Long = Long.MaxValue,
+ minSequenceNr: Long = 0L,
+ minTimestamp: Long = 0L) {
/**
* INTERNAL API.
diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala
index aedae4a8c5..812472d21d 100644
--- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala
@@ -207,9 +207,10 @@ object PersistentFSM {
* @tparam D state data type
*/
@InternalApi
- private[persistence] case class PersistentFSMSnapshot[D](stateIdentifier: String,
- data: D,
- timeout: Option[FiniteDuration])
+ private[persistence] case class PersistentFSMSnapshot[D](
+ stateIdentifier: String,
+ data: D,
+ timeout: Option[FiniteDuration])
extends Message
/**
@@ -337,27 +338,29 @@ object PersistentFSM {
* accumulated while processing the last message, possibly domain event and handler
* to be executed after FSM moves to the new state (also triggered when staying in the same state)
*/
- final case class State[S, D, E](stateName: S,
- stateData: D,
- timeout: Option[FiniteDuration] = None,
- stopReason: Option[Reason] = None,
- replies: List[Any] = Nil,
- domainEvents: Seq[E] = Nil,
- afterTransitionDo: D => Unit = { _: D =>
- })(private[akka] val notifies: Boolean = true) {
+ final case class State[S, D, E](
+ stateName: S,
+ stateData: D,
+ timeout: Option[FiniteDuration] = None,
+ stopReason: Option[Reason] = None,
+ replies: List[Any] = Nil,
+ domainEvents: Seq[E] = Nil,
+ afterTransitionDo: D => Unit = { _: D =>
+ })(private[akka] val notifies: Boolean = true) {
/**
* Copy object and update values if needed.
*/
@InternalApi
- private[akka] def copy(stateName: S = stateName,
- stateData: D = stateData,
- timeout: Option[FiniteDuration] = timeout,
- stopReason: Option[Reason] = stopReason,
- replies: List[Any] = replies,
- notifies: Boolean = notifies,
- domainEvents: Seq[E] = domainEvents,
- afterTransitionDo: D => Unit = afterTransitionDo): State[S, D, E] = {
+ private[akka] def copy(
+ stateName: S = stateName,
+ stateData: D = stateData,
+ timeout: Option[FiniteDuration] = timeout,
+ stopReason: Option[Reason] = stopReason,
+ replies: List[Any] = replies,
+ notifies: Boolean = notifies,
+ domainEvents: Seq[E] = domainEvents,
+ afterTransitionDo: D => Unit = afterTransitionDo): State[S, D, E] = {
State(stateName, stateData, timeout, stopReason, replies, domainEvents, afterTransitionDo)(notifies)
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala
index 2e539b6392..f7d49bd461 100644
--- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala
@@ -706,9 +706,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param stateTimeout default state timeout for this state
* @param stateFunctionBuilder partial function builder describing response to input
*/
- final def when(stateName: S,
- stateTimeout: FiniteDuration,
- stateFunctionBuilder: FSMStateFunctionBuilder[S, D, E]): Unit =
+ final def when(
+ stateName: S,
+ stateTimeout: FiniteDuration,
+ stateFunctionBuilder: FSMStateFunctionBuilder[S, D, E]): Unit =
when(stateName, stateTimeout)(stateFunctionBuilder.build())
/**
@@ -781,10 +782,11 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[ET, DT <: D](eventType: Class[ET],
- dataType: Class[DT],
- predicate: TypedPredicate2[ET, DT],
- apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] =
+ final def matchEvent[ET, DT <: D](
+ eventType: Class[ET],
+ dataType: Class[DT],
+ predicate: TypedPredicate2[ET, DT],
+ apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] =
new FSMStateFunctionBuilder[S, D, E]().event(eventType, dataType, predicate, apply)
/**
@@ -797,9 +799,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[ET, DT <: D](eventType: Class[ET],
- dataType: Class[DT],
- apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] =
+ final def matchEvent[ET, DT <: D](
+ eventType: Class[ET],
+ dataType: Class[DT],
+ apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] =
new FSMStateFunctionBuilder[S, D, E]().event(eventType, dataType, apply)
/**
@@ -812,9 +815,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[ET](eventType: Class[ET],
- predicate: TypedPredicate2[ET, D],
- apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D, E] =
+ final def matchEvent[ET](
+ eventType: Class[ET],
+ predicate: TypedPredicate2[ET, D],
+ apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D, E] =
new FSMStateFunctionBuilder[S, D, E]().event(eventType, predicate, apply)
/**
@@ -838,8 +842,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent(predicate: TypedPredicate2[AnyRef, D],
- apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D, E] =
+ final def matchEvent(
+ predicate: TypedPredicate2[AnyRef, D],
+ apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D, E] =
new FSMStateFunctionBuilder[S, D, E]().event(predicate, apply)
/**
@@ -853,9 +858,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEvent[DT <: D](eventMatches: JList[AnyRef],
- dataType: Class[DT],
- apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D, E] =
+ final def matchEvent[DT <: D](
+ eventMatches: JList[AnyRef],
+ dataType: Class[DT],
+ apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D, E] =
new FSMStateFunctionBuilder[S, D, E]().event(eventMatches, dataType, apply)
/**
@@ -881,9 +887,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param apply an action to apply to the event and state data if there is a match
* @return the builder with the case statement added
*/
- final def matchEventEquals[Ev, DT <: D](event: Ev,
- dataType: Class[DT],
- apply: Apply2[Ev, DT, State]): FSMStateFunctionBuilder[S, D, E] =
+ final def matchEventEquals[Ev, DT <: D](
+ event: Ev,
+ dataType: Class[DT],
+ apply: Apply2[Ev, DT, State]): FSMStateFunctionBuilder[S, D, E] =
new FSMStateFunctionBuilder[S, D, E]().eventEquals(event, dataType, apply)
/**
@@ -969,9 +976,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param predicate a predicate that will be evaluated on the reason if the type matches
* @return the builder with the case statement added
*/
- final def matchStop[RT <: Reason](reasonType: Class[RT],
- predicate: TypedPredicate[RT],
- apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] =
+ final def matchStop[RT <: Reason](
+ reasonType: Class[RT],
+ predicate: TypedPredicate[RT],
+ apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] =
new FSMStopBuilder[S, D]().stop(reasonType, predicate, apply)
/**
@@ -992,9 +1000,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param apply an action to apply to the argument if the type and predicate matches
* @return a builder with the case statement added
*/
- final def matchData[DT <: D](dataType: Class[DT],
- predicate: TypedPredicate[DT],
- apply: UnitApply[DT]): UnitPFBuilder[D] =
+ final def matchData[DT <: D](
+ dataType: Class[DT],
+ predicate: TypedPredicate[DT],
+ apply: UnitApply[DT]): UnitPFBuilder[D] =
UnitMatch.`match`(dataType, predicate, apply)
/**
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala
index 98b5494985..dd8e9f496b 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala
@@ -98,10 +98,11 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery {
}
case Failure(e) =>
a.payload.foreach { p =>
- resequencer ! Desequenced(WriteMessageRejected(p, e, actorInstanceId),
- n,
- persistentActor,
- p.sender)
+ resequencer ! Desequenced(
+ WriteMessageRejected(p, e, actorInstanceId),
+ n,
+ persistentActor,
+ p.sender)
n += 1
}
}
@@ -130,11 +131,12 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery {
val replyTo =
if (isReplayFilterEnabled)
context.actorOf(
- ReplayFilter.props(persistentActor,
- replayFilterMode,
- replayFilterWindowSize,
- replayFilterMaxOldWriters,
- replayDebugEnabled))
+ ReplayFilter.props(
+ persistentActor,
+ replayFilterMode,
+ replayFilterWindowSize,
+ replayFilterMaxOldWriters,
+ replayDebugEnabled))
else persistentActor
val readHighestSequenceNrFrom = math.max(0L, fromSequenceNr - 1)
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala
index c296aed312..2358715c27 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala
@@ -128,9 +128,10 @@ private[persistence] object AsyncWriteTarget {
@SerialVersionUID(1L)
class AsyncReplayTimeoutException(msg: String) extends AkkaException(msg)
-private class ReplayMediator(replayCallback: PersistentRepr => Unit,
- replayCompletionPromise: Promise[Unit],
- replayTimeout: Duration)
+private class ReplayMediator(
+ replayCallback: PersistentRepr => Unit,
+ replayCompletionPromise: Promise[Unit],
+ replayTimeout: Duration)
extends Actor {
import AsyncWriteTarget._
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala
index 3edeef810f..4b70f879a9 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala
@@ -20,9 +20,10 @@ import scala.util.Try
/**
* `EventAdapters` serves as a per-journal collection of bound event adapters.
*/
-class EventAdapters(map: ConcurrentHashMap[Class[_], EventAdapter],
- bindings: immutable.Seq[(Class[_], EventAdapter)],
- log: LoggingAdapter) {
+class EventAdapters(
+ map: ConcurrentHashMap[Class[_], EventAdapter],
+ bindings: immutable.Seq[(Class[_], EventAdapter)],
+ log: LoggingAdapter) {
/**
* Finds the "most specific" matching adapter for the given class (i.e. it may return an adapter that can work on a
@@ -70,17 +71,19 @@ private[akka] object EventAdapters {
apply(system, adapters, adapterBindings)
}
- private def apply(system: ExtendedActorSystem,
- adapters: Map[Name, FQN],
- adapterBindings: Map[FQN, BoundAdapters]): EventAdapters = {
+ private def apply(
+ system: ExtendedActorSystem,
+ adapters: Map[Name, FQN],
+ adapterBindings: Map[FQN, BoundAdapters]): EventAdapters = {
val adapterNames = adapters.keys.toSet
for {
(fqn, boundToAdapters) <- adapterBindings
boundAdapter <- boundToAdapters
- } require(adapterNames(boundAdapter.toString),
- s"$fqn was bound to undefined event-adapter: $boundAdapter (bindings: ${boundToAdapters
- .mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})")
+ } require(
+ adapterNames(boundAdapter.toString),
+ s"$fqn was bound to undefined event-adapter: $boundAdapter (bindings: ${boundToAdapters
+ .mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})")
// A Map of handler from alias to implementation (i.e. class implementing akka.serialization.Serializer)
// For example this defines a handler named 'country': `"country" -> com.example.comain.CountryTagsAdapter`
@@ -93,8 +96,9 @@ private[akka] object EventAdapters {
yield
if (as.size == 1) (system.dynamicAccess.getClassFor[Any](k).get, handlers(as.head))
else
- (system.dynamicAccess.getClassFor[Any](k).get,
- NoopWriteEventAdapter(CombinedReadEventAdapter(as.map(handlers))))
+ (
+ system.dynamicAccess.getClassFor[Any](k).get,
+ NoopWriteEventAdapter(CombinedReadEventAdapter(as.map(handlers))))
sort(bs)
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala
index a95920cd4f..a7eeca4ce3 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala
@@ -19,11 +19,12 @@ import scala.collection.mutable.LinkedHashSet
* sequenceNr in the replayed events to find events emitted by overlapping writers.
*/
private[akka] object ReplayFilter {
- def props(persistentActor: ActorRef,
- mode: Mode,
- windowSize: Int,
- maxOldWriters: Int,
- debugEnabled: Boolean): Props = {
+ def props(
+ persistentActor: ActorRef,
+ mode: Mode,
+ windowSize: Int,
+ maxOldWriters: Int,
+ debugEnabled: Boolean): Props = {
require(windowSize > 0, "windowSize must be > 0")
require(maxOldWriters > 0, "maxOldWriters must be > 0")
require(mode != Disabled, "mode must not be Disabled")
@@ -44,11 +45,12 @@ private[akka] object ReplayFilter {
/**
* INTERNAL API
*/
-private[akka] class ReplayFilter(persistentActor: ActorRef,
- mode: ReplayFilter.Mode,
- windowSize: Int,
- maxOldWriters: Int,
- debugEnabled: Boolean)
+private[akka] class ReplayFilter(
+ persistentActor: ActorRef,
+ mode: ReplayFilter.Mode,
+ windowSize: Int,
+ maxOldWriters: Int,
+ debugEnabled: Boolean)
extends Actor
with ActorLogging {
import JournalProtocol._
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala
index 6ba94ac119..cdcad180d0 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala
@@ -108,11 +108,12 @@ private[persistence] object LeveldbJournal {
* `fromSequenceNr` is exclusive
* `toSequenceNr` is inclusive
*/
- final case class ReplayTaggedMessages(fromSequenceNr: Long,
- toSequenceNr: Long,
- max: Long,
- tag: String,
- replyTo: ActorRef)
+ final case class ReplayTaggedMessages(
+ fromSequenceNr: Long,
+ toSequenceNr: Long,
+ max: Long,
+ tag: String,
+ replyTo: ActorRef)
extends SubscriptionCommand
final case class ReplayedTaggedMessage(persistent: PersistentRepr, tag: String, offset: Long)
extends DeadLetterSuppression
@@ -134,9 +135,10 @@ private[persistence] class SharedLeveldbJournal extends AsyncWriteProxy {
store match {
case Some(s) => s.forward(cmd)
case None =>
- log.error("Failed {} request. " +
- "Store not initialized. Use `SharedLeveldbJournal.setStore(sharedStore, system)`",
- cmd)
+ log.error(
+ "Failed {} request. " +
+ "Store not initialized. Use `SharedLeveldbJournal.setStore(sharedStore, system)`",
+ cmd)
}
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala
index 86761a2607..c67c33e5cf 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala
@@ -87,8 +87,9 @@ private[persistence] trait LeveldbStore
if (tags.nonEmpty && hasTagSubscribers)
allTags = allTags.union(tags)
- require(!p2.persistenceId.startsWith(tagPersistenceIdPrefix),
- s"persistenceId [${p.persistenceId}] must not start with $tagPersistenceIdPrefix")
+ require(
+ !p2.persistenceId.startsWith(tagPersistenceIdPrefix),
+ s"persistenceId [${p.persistenceId}] must not start with $tagPersistenceIdPrefix")
addToMessageBatch(p2, tags, batch)
}
if (hasPersistenceIdSubscribers)
diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala
index 93ad137c01..2e3581223d 100644
--- a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala
@@ -115,30 +115,33 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer
import scala.collection.JavaConverters._
val unconfirmedDeliveries = new VectorBuilder[UnconfirmedDelivery]()
atLeastOnceDeliverySnapshot.getUnconfirmedDeliveriesList().iterator().asScala.foreach { next =>
- unconfirmedDeliveries += UnconfirmedDelivery(next.getDeliveryId,
- ActorPath.fromString(next.getDestination),
- payload(next.getPayload))
+ unconfirmedDeliveries += UnconfirmedDelivery(
+ next.getDeliveryId,
+ ActorPath.fromString(next.getDestination),
+ payload(next.getPayload))
}
AtLeastOnceDeliverySnapshot(atLeastOnceDeliverySnapshot.getCurrentDeliveryId, unconfirmedDeliveries.result())
}
def stateChange(persistentStateChange: mf.PersistentStateChangeEvent): StateChangeEvent = {
- StateChangeEvent(persistentStateChange.getStateIdentifier,
- // timeout field is deprecated, left for backward compatibility. timeoutNanos is used instead.
- if (persistentStateChange.hasTimeoutNanos)
- Some(Duration.fromNanos(persistentStateChange.getTimeoutNanos))
- else if (persistentStateChange.hasTimeout)
- Some(Duration(persistentStateChange.getTimeout).asInstanceOf[duration.FiniteDuration])
- else None)
+ StateChangeEvent(
+ persistentStateChange.getStateIdentifier,
+ // timeout field is deprecated, left for backward compatibility. timeoutNanos is used instead.
+ if (persistentStateChange.hasTimeoutNanos)
+ Some(Duration.fromNanos(persistentStateChange.getTimeoutNanos))
+ else if (persistentStateChange.hasTimeout)
+ Some(Duration(persistentStateChange.getTimeout).asInstanceOf[duration.FiniteDuration])
+ else None)
}
def persistentFSMSnapshot(persistentFSMSnapshot: mf.PersistentFSMSnapshot): PersistentFSMSnapshot[Any] = {
- PersistentFSMSnapshot(persistentFSMSnapshot.getStateIdentifier,
- payload(persistentFSMSnapshot.getData),
- if (persistentFSMSnapshot.hasTimeoutNanos)
- Some(Duration.fromNanos(persistentFSMSnapshot.getTimeoutNanos))
- else None)
+ PersistentFSMSnapshot(
+ persistentFSMSnapshot.getStateIdentifier,
+ payload(persistentFSMSnapshot.getData),
+ if (persistentFSMSnapshot.hasTimeoutNanos)
+ Some(Duration.fromNanos(persistentFSMSnapshot.getTimeoutNanos))
+ else None)
}
private def atomicWriteBuilder(a: AtomicWrite) = {
@@ -189,14 +192,15 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer
//
private def persistent(persistentMessage: mf.PersistentMessage): PersistentRepr = {
- PersistentRepr(payload(persistentMessage.getPayload),
- persistentMessage.getSequenceNr,
- if (persistentMessage.hasPersistenceId) persistentMessage.getPersistenceId else Undefined,
- if (persistentMessage.hasManifest) persistentMessage.getManifest else Undefined,
- if (persistentMessage.hasDeleted) persistentMessage.getDeleted else false,
- if (persistentMessage.hasSender) system.provider.resolveActorRef(persistentMessage.getSender)
- else Actor.noSender,
- if (persistentMessage.hasWriterUuid) persistentMessage.getWriterUuid else Undefined)
+ PersistentRepr(
+ payload(persistentMessage.getPayload),
+ persistentMessage.getSequenceNr,
+ if (persistentMessage.hasPersistenceId) persistentMessage.getPersistenceId else Undefined,
+ if (persistentMessage.hasManifest) persistentMessage.getManifest else Undefined,
+ if (persistentMessage.hasDeleted) persistentMessage.getDeleted else false,
+ if (persistentMessage.hasSender) system.provider.resolveActorRef(persistentMessage.getSender)
+ else Actor.noSender,
+ if (persistentMessage.hasWriterUuid) persistentMessage.getWriterUuid else Undefined)
}
private def atomicWrite(atomicWrite: mf.AtomicWrite): AtomicWrite = {
diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala
index 15326f92ba..e5644ddfa4 100644
--- a/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala
@@ -16,8 +16,9 @@ import scala.concurrent.Future
abstract class SnapshotStore extends SSnapshotStore with SnapshotStorePlugin {
import context.dispatcher
- override final def loadAsync(persistenceId: String,
- criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] =
+ override final def loadAsync(
+ persistenceId: String,
+ criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] =
doLoadAsync(persistenceId, criteria).map(option)
override final def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] =
diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala
index 1b6c157b64..e56c1e3398 100644
--- a/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala
@@ -39,8 +39,9 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt
private val serializationExtension = SerializationExtension(context.system)
private var saving = immutable.Set.empty[SnapshotMetadata] // saving in progress
- override def loadAsync(persistenceId: String,
- criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
+ override def loadAsync(
+ persistenceId: String,
+ criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
//
// Heuristics:
//
@@ -145,8 +146,9 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt
snapshotDir,
s"snapshot-${URLEncoder.encode(metadata.persistenceId, UTF_8)}-${metadata.sequenceNr}-${metadata.timestamp}${extension}")
- private def snapshotMetadatas(persistenceId: String,
- criteria: SnapshotSelectionCriteria): immutable.Seq[SnapshotMetadata] = {
+ private def snapshotMetadatas(
+ persistenceId: String,
+ criteria: SnapshotSelectionCriteria): immutable.Seq[SnapshotMetadata] = {
val files = snapshotDir.listFiles(new SnapshotFilenameFilter(persistenceId))
if (files eq null) Nil // if the dir was removed
else {
diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala
index 503c875847..a2fc219661 100644
--- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala
@@ -29,32 +29,35 @@ object AtLeastOnceDeliverySpec {
case object SaveSnap
case class Snap(deliverySnapshot: AtLeastOnceDeliverySnapshot) // typically includes some user data as well
- def senderProps(testActor: ActorRef,
- name: String,
- redeliverInterval: FiniteDuration,
- warnAfterNumberOfUnconfirmedAttempts: Int,
- redeliveryBurstLimit: Int,
- destinations: Map[String, ActorPath],
- async: Boolean,
- actorSelectionDelivery: Boolean = false): Props =
+ def senderProps(
+ testActor: ActorRef,
+ name: String,
+ redeliverInterval: FiniteDuration,
+ warnAfterNumberOfUnconfirmedAttempts: Int,
+ redeliveryBurstLimit: Int,
+ destinations: Map[String, ActorPath],
+ async: Boolean,
+ actorSelectionDelivery: Boolean = false): Props =
Props(
- new Sender(testActor,
- name,
- redeliverInterval,
- warnAfterNumberOfUnconfirmedAttempts,
- redeliveryBurstLimit,
- destinations,
- async,
- actorSelectionDelivery))
+ new Sender(
+ testActor,
+ name,
+ redeliverInterval,
+ warnAfterNumberOfUnconfirmedAttempts,
+ redeliveryBurstLimit,
+ destinations,
+ async,
+ actorSelectionDelivery))
- class Sender(testActor: ActorRef,
- name: String,
- override val redeliverInterval: FiniteDuration,
- override val warnAfterNumberOfUnconfirmedAttempts: Int,
- override val redeliveryBurstLimit: Int,
- destinations: Map[String, ActorPath],
- async: Boolean,
- actorSelectionDelivery: Boolean)
+ class Sender(
+ testActor: ActorRef,
+ name: String,
+ override val redeliverInterval: FiniteDuration,
+ override val warnAfterNumberOfUnconfirmedAttempts: Int,
+ override val redeliveryBurstLimit: Int,
+ destinations: Map[String, ActorPath],
+ async: Boolean,
+ actorSelectionDelivery: Boolean)
extends PersistentActor
with AtLeastOnceDelivery
with ActorLogging {
@@ -206,15 +209,17 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val probeA = TestProbe()
val dst = system.actorOf(destinationProps(probeA.ref))
val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path)
- val snd = system.actorOf(senderProps(probe.ref,
- name,
- 2.seconds,
- 5,
- 1000,
- destinations,
- async = false,
- actorSelectionDelivery = deliverUsingActorSelection),
- name)
+ val snd = system.actorOf(
+ senderProps(
+ probe.ref,
+ name,
+ 2.seconds,
+ 5,
+ 1000,
+ destinations,
+ async = false,
+ actorSelectionDelivery = deliverUsingActorSelection),
+ name)
snd.tell(Req("a-1"), probe.ref)
probe.expectMsg(ReqAck)
probeA.expectMsg(Action(1, "a-1"))
@@ -304,8 +309,9 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
// and then re-delivered
probeA.expectMsg(Action(2, "a-2")) // re-delivered
// a-4 was re-delivered but lost
- probeA.expectMsgAllOf(Action(5, "a-5"), // re-delivered
- Action(4, "a-4")) // re-delivered, 3rd time
+ probeA.expectMsgAllOf(
+ Action(5, "a-5"), // re-delivered
+ Action(4, "a-4")) // re-delivered, 3rd time
probeA.expectNoMsg(1.second)
}
@@ -378,9 +384,10 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val dstA = system.actorOf(destinationProps(probeA.ref), "destination-a")
val dstB = system.actorOf(destinationProps(probeB.ref), "destination-b")
val dstC = system.actorOf(destinationProps(probeC.ref), "destination-c")
- val destinations = Map("A" -> system.actorOf(unreliableProps(2, dstA), "unreliable-a").path,
- "B" -> system.actorOf(unreliableProps(5, dstB), "unreliable-b").path,
- "C" -> system.actorOf(unreliableProps(3, dstC), "unreliable-c").path)
+ val destinations = Map(
+ "A" -> system.actorOf(unreliableProps(2, dstA), "unreliable-a").path,
+ "B" -> system.actorOf(unreliableProps(5, dstB), "unreliable-b").path,
+ "C" -> system.actorOf(unreliableProps(3, dstC), "unreliable-c").path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = true), name)
val N = 100
for (n <- 1 to N) {
diff --git a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala
index 5b982a029a..f795bd5141 100644
--- a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala
@@ -102,9 +102,10 @@ abstract class EventAdapterSpec(journalName: String, journalConfig: Config, adap
import EventAdapterSpec._
def this(journalName: String) {
- this("inmem",
- PersistenceSpec.config("inmem", "InmemPersistentTaggingSpec"),
- ConfigFactory.parseString(s"""
+ this(
+ "inmem",
+ PersistenceSpec.config("inmem", "InmemPersistentTaggingSpec"),
+ ConfigFactory.parseString(s"""
|akka.persistence.journal {
|
| common-event-adapters {
diff --git a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala
index e7e8f03b31..b9d007a922 100644
--- a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala
@@ -46,10 +46,11 @@ object EventSourcedActorDeleteFailureSpec {
class EventSourcedActorDeleteFailureSpec
extends PersistenceSpec(
- PersistenceSpec.config("inmem",
- "SnapshotFailureRobustnessSpec",
- extraConfig = Some(
- """
+ PersistenceSpec.config(
+ "inmem",
+ "SnapshotFailureRobustnessSpec",
+ extraConfig = Some(
+ """
akka.persistence.journal.inmem.class = "akka.persistence.EventSourcedActorDeleteFailureSpec$DeleteFailingInmemJournal"
""")))
with ImplicitSender {
diff --git a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala
index 3f06922c4a..185601f72a 100644
--- a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala
@@ -142,10 +142,11 @@ object EventSourcedActorFailureSpec {
class EventSourcedActorFailureSpec
extends PersistenceSpec(
- PersistenceSpec.config("inmem",
- "SnapshotFailureRobustnessSpec",
- extraConfig = Some(
- """
+ PersistenceSpec.config(
+ "inmem",
+ "SnapshotFailureRobustnessSpec",
+ extraConfig = Some(
+ """
akka.persistence.journal.inmem.class = "akka.persistence.EventSourcedActorFailureSpec$FailingInmemJournal"
""")))
with ImplicitSender {
diff --git a/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala b/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala
index 4c3c51b86f..ed2093b3cc 100644
--- a/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala
@@ -22,10 +22,10 @@ object LoadJournalSpec {
class LoadJournalSpec
extends PersistenceSpec(
- PersistenceSpec.config("inmem",
- "LoadJournalSpec",
- extraConfig = Some(
- """
+ PersistenceSpec.config(
+ "inmem",
+ "LoadJournalSpec",
+ extraConfig = Some("""
akka.persistence.journal.inmem.class = "akka.persistence.LoadJournalSpec$JournalWithConfig"
akka.persistence.journal.inmem.extra-property = 17
""")))
diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala
index ab15b0f9f8..1170598a7f 100644
--- a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala
@@ -78,9 +78,10 @@ object PersistenceSpec {
trait Cleanup { this: AkkaSpec =>
val storageLocations =
- List("akka.persistence.journal.leveldb.dir",
- "akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
+ List(
+ "akka.persistence.journal.leveldb.dir",
+ "akka.persistence.journal.leveldb-shared.store.dir",
+ "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))
override protected def atStartup(): Unit = {
storageLocations.foreach(FileUtils.deleteDirectory)
@@ -117,9 +118,10 @@ trait PersistenceMatchers {
sortedNrs = nrs.sorted
if nrs != sortedNrs
} yield
- MatchResult(false,
- s"""Messages sequence with prefix ${prefixes(pos)} was not sorted! Was: $seq"""",
- s"""Messages sequence with prefix ${prefixes(pos)} was sorted! Was: $seq"""")
+ MatchResult(
+ false,
+ s"""Messages sequence with prefix ${prefixes(pos)} was not sorted! Was: $seq"""",
+ s"""Messages sequence with prefix ${prefixes(pos)} was sorted! Was: $seq"""")
if (results.forall(_.matches)) MatchResult(true, "", "")
else results.find(r => !r.matches).get
diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala
index 94164e486b..affdc63e38 100644
--- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala
@@ -62,9 +62,10 @@ class SteppingInMemPersistentActorBoundedStashingSpec(strategyConfig: String)
SteppingInmemJournal
.config("persistence-bounded-stash")
.withFallback(
- PersistenceSpec.config("stepping-inmem",
- "SteppingInMemPersistentActorBoundedStashingSpec",
- extraConfig = Some(strategyConfig))))
+ PersistenceSpec.config(
+ "stepping-inmem",
+ "SteppingInMemPersistentActorBoundedStashingSpec",
+ extraConfig = Some(strategyConfig))))
with BeforeAndAfterEach
with ImplicitSender {
diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala
index 7ec8376c0b..62be70c35a 100644
--- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala
@@ -152,12 +152,14 @@ object PersistentActorSpec {
}
}
}
- class ChangeBehaviorInLastEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInLastEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInLastEventHandlerPersistentActor(name)
with LevelDbRuntimePluginConfig
- class ChangeBehaviorInLastEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInLastEventHandlerPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInLastEventHandlerPersistentActor(name)
with InmemRuntimePluginConfig
@@ -179,12 +181,14 @@ object PersistentActorSpec {
}
}
}
- class ChangeBehaviorInFirstEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInFirstEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInFirstEventHandlerPersistentActor(name)
with LevelDbRuntimePluginConfig
- class ChangeBehaviorInFirstEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInFirstEventHandlerPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInFirstEventHandlerPersistentActor(name)
with InmemRuntimePluginConfig
@@ -202,12 +206,14 @@ object PersistentActorSpec {
persist(Evt(s"${data}-0"))(updateState)
}
}
- class ChangeBehaviorInCommandHandlerFirstPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInCommandHandlerFirstPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInCommandHandlerFirstPersistentActor(name)
with LevelDbRuntimePluginConfig
- class ChangeBehaviorInCommandHandlerFirstPersistentActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInCommandHandlerFirstPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInCommandHandlerFirstPersistentActor(name)
with InmemRuntimePluginConfig
@@ -225,12 +231,14 @@ object PersistentActorSpec {
context.become(newBehavior)
}
}
- class ChangeBehaviorInCommandHandlerLastPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInCommandHandlerLastPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInCommandHandlerLastPersistentActor(name)
with LevelDbRuntimePluginConfig
- class ChangeBehaviorInCommandHandlerLastPersistentActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class ChangeBehaviorInCommandHandlerLastPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends ChangeBehaviorInCommandHandlerLastPersistentActor(name)
with InmemRuntimePluginConfig
@@ -251,14 +259,16 @@ object PersistentActorSpec {
case "snap" => saveSnapshot(events)
}
}
- class SnapshottingPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class SnapshottingPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends SnapshottingPersistentActor(name, probe)
with LevelDbRuntimePluginConfig
- class SnapshottingPersistentActorWithInmemRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class SnapshottingPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends SnapshottingPersistentActor(name, probe)
with InmemRuntimePluginConfig
@@ -279,14 +289,16 @@ object PersistentActorSpec {
case "It's changing me" => probe ! "I am becoming"
}
}
- class SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends SnapshottingBecomingPersistentActor(name, probe)
with LevelDbRuntimePluginConfig
- class SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends SnapshottingBecomingPersistentActor(name, probe)
with InmemRuntimePluginConfig
@@ -378,8 +390,9 @@ object PersistentActorSpec {
}
}
}
- class AsyncPersistSameEventTwicePersistentActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class AsyncPersistSameEventTwicePersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends AsyncPersistSameEventTwicePersistentActor(name)
with LevelDbRuntimePluginConfig
class AsyncPersistSameEventTwicePersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config)
@@ -441,8 +454,9 @@ object PersistentActorSpec {
val providedConfig: Config)
extends AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor(name)
with LevelDbRuntimePluginConfig
- class AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor(name)
with InmemRuntimePluginConfig
@@ -468,12 +482,14 @@ object PersistentActorSpec {
sendMsgCounter
}
}
- class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends AsyncPersistAndPersistMixedSyncAsyncPersistentActor(name)
with LevelDbRuntimePluginConfig
- class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends AsyncPersistAndPersistMixedSyncAsyncPersistentActor(name)
with InmemRuntimePluginConfig
@@ -535,14 +551,16 @@ object PersistentActorSpec {
}
}
- class HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends HandleRecoveryFinishedEventPersistentActor(name, probe)
with LevelDbRuntimePluginConfig
- class HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends HandleRecoveryFinishedEventPersistentActor(name, probe)
with InmemRuntimePluginConfig
@@ -624,20 +642,24 @@ object PersistentActorSpec {
class DeferringSyncMixedCallsPPADDPADPersistActor(name: String)
extends DeferringMixedCallsPPADDPADPersistActor(name)
with DeferSync
- class DeferringAsyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringAsyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringAsyncMixedCallsPPADDPADPersistActor(name)
with LevelDbRuntimePluginConfig
- class DeferringSyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringSyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringSyncMixedCallsPPADDPADPersistActor(name)
with LevelDbRuntimePluginConfig
- class DeferringAsyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringAsyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringAsyncMixedCallsPPADDPADPersistActor(name)
with InmemRuntimePluginConfig
- class DeferringSyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringSyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringSyncMixedCallsPPADDPADPersistActor(name)
with InmemRuntimePluginConfig
@@ -657,20 +679,24 @@ object PersistentActorSpec {
class DeferringSyncWithNoPersistCallsPersistActor(name: String)
extends DeferringWithNoPersistCallsPersistActor(name)
with DeferSync
- class DeferringAsyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringAsyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringAsyncWithNoPersistCallsPersistActor(name)
with LevelDbRuntimePluginConfig
- class DeferringSyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringSyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringSyncWithNoPersistCallsPersistActor(name)
with LevelDbRuntimePluginConfig
- class DeferringAsyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringAsyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringAsyncWithNoPersistCallsPersistActor(name)
with InmemRuntimePluginConfig
- class DeferringSyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(name: String,
- val providedConfig: Config)
+ class DeferringSyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(
+ name: String,
+ val providedConfig: Config)
extends DeferringSyncWithNoPersistCallsPersistActor(name)
with InmemRuntimePluginConfig
@@ -766,9 +792,10 @@ object PersistentActorSpec {
}
}
}
- class MultipleAndNestedPersistsWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class MultipleAndNestedPersistsWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends MultipleAndNestedPersists(name, probe)
with LevelDbRuntimePluginConfig
class MultipleAndNestedPersistsWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config)
@@ -793,14 +820,16 @@ object PersistentActorSpec {
}
}
}
- class MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends MultipleAndNestedPersistAsyncs(name, probe)
with LevelDbRuntimePluginConfig
- class MultipleAndNestedPersistAsyncsWithInmemRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class MultipleAndNestedPersistAsyncsWithInmemRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends MultipleAndNestedPersistAsyncs(name, probe)
with InmemRuntimePluginConfig
@@ -825,16 +854,18 @@ object PersistentActorSpec {
persistAsync(s + "-" + 1)(weMustGoDeeper)
}
}
- class DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig(name: String,
- maxDepth: Int,
- probe: ActorRef,
- val providedConfig: Config)
+ class DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig(
+ name: String,
+ maxDepth: Int,
+ probe: ActorRef,
+ val providedConfig: Config)
extends DeeplyNestedPersistAsyncs(name, maxDepth, probe)
with LevelDbRuntimePluginConfig
- class DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig(name: String,
- maxDepth: Int,
- probe: ActorRef,
- val providedConfig: Config)
+ class DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig(
+ name: String,
+ maxDepth: Int,
+ probe: ActorRef,
+ val providedConfig: Config)
extends DeeplyNestedPersistAsyncs(name, maxDepth, probe)
with InmemRuntimePluginConfig
@@ -856,14 +887,16 @@ object PersistentActorSpec {
}
}
}
- class NestedPersistNormalAndAsyncsWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class NestedPersistNormalAndAsyncsWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends NestedPersistNormalAndAsyncs(name, probe)
with LevelDbRuntimePluginConfig
- class NestedPersistNormalAndAsyncsWithInmemRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class NestedPersistNormalAndAsyncsWithInmemRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends NestedPersistNormalAndAsyncs(name, probe)
with InmemRuntimePluginConfig
@@ -885,14 +918,16 @@ object PersistentActorSpec {
}
}
}
- class NestedPersistAsyncsAndNormalWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class NestedPersistAsyncsAndNormalWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends NestedPersistAsyncsAndNormal(name, probe)
with LevelDbRuntimePluginConfig
- class NestedPersistAsyncsAndNormalWithInmemRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class NestedPersistAsyncsAndNormalWithInmemRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends NestedPersistAsyncsAndNormal(name, probe)
with InmemRuntimePluginConfig
@@ -911,14 +946,16 @@ object PersistentActorSpec {
}
}
}
- class NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends NestedPersistInAsyncEnforcesStashing(name, probe)
with LevelDbRuntimePluginConfig
- class NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig(name: String,
- probe: ActorRef,
- val providedConfig: Config)
+ class NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig(
+ name: String,
+ probe: ActorRef,
+ val providedConfig: Config)
extends NestedPersistInAsyncEnforcesStashing(name, probe)
with InmemRuntimePluginConfig
@@ -943,16 +980,18 @@ object PersistentActorSpec {
persist(s + "-" + 1)(weMustGoDeeper)
}
}
- class DeeplyNestedPersistsWithLevelDbRuntimePluginConfig(name: String,
- maxDepth: Int,
- probe: ActorRef,
- val providedConfig: Config)
+ class DeeplyNestedPersistsWithLevelDbRuntimePluginConfig(
+ name: String,
+ maxDepth: Int,
+ probe: ActorRef,
+ val providedConfig: Config)
extends DeeplyNestedPersists(name, maxDepth, probe)
with LevelDbRuntimePluginConfig
- class DeeplyNestedPersistsWithInmemRuntimePluginConfig(name: String,
- maxDepth: Int,
- probe: ActorRef,
- val providedConfig: Config)
+ class DeeplyNestedPersistsWithInmemRuntimePluginConfig(
+ name: String,
+ maxDepth: Int,
+ probe: ActorRef,
+ val providedConfig: Config)
extends DeeplyNestedPersists(name, maxDepth, probe)
with InmemRuntimePluginConfig
@@ -1808,10 +1847,12 @@ class LeveldbPersistentActorWithRuntimePluginConfigSpec
| custom.persistence.journal.leveldb.dir = target/journal-LeveldbPersistentActorWithRuntimePluginConfigSpec
| custom.persistence.snapshot-store.local.dir = target/snapshots-LeveldbPersistentActorWithRuntimePluginConfigSpec/
""".stripMargin)
- .withValue(s"custom.persistence.journal.leveldb",
- system.settings.config.getValue(s"akka.persistence.journal.leveldb"))
- .withValue("custom.persistence.snapshot-store.local",
- system.settings.config.getValue("akka.persistence.snapshot-store.local"))
+ .withValue(
+ s"custom.persistence.journal.leveldb",
+ system.settings.config.getValue(s"akka.persistence.journal.leveldb"))
+ .withValue(
+ "custom.persistence.snapshot-store.local",
+ system.settings.config.getValue("akka.persistence.snapshot-store.local"))
}
override protected def behavior1PersistentActor: ActorRef =
@@ -1845,10 +1886,11 @@ class LeveldbPersistentActorWithRuntimePluginConfigSpec
override protected def snapshottingBecomingPersistentActor: ActorRef =
system.actorOf(
- Props(classOf[SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig],
- name,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig],
+ name,
+ testActor,
+ providedActorConfig))
override protected def replyInEventHandlerPersistentActor: ActorRef =
namedPersistentActorWithProvidedConfig[ReplyInEventHandlerPersistentActorWithLevelDbRuntimePluginConfig](
@@ -1888,10 +1930,11 @@ class LeveldbPersistentActorWithRuntimePluginConfigSpec
override protected def handleRecoveryFinishedEventPersistentActor: ActorRef =
system.actorOf(
- Props(classOf[HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig],
- name,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig],
+ name,
+ testActor,
+ providedActorConfig))
override protected def stressOrdering: ActorRef =
namedPersistentActorWithProvidedConfig[StressOrderingWithLevelDbRuntimePluginConfig](providedActorConfig)
@@ -1906,26 +1949,29 @@ class LeveldbPersistentActorWithRuntimePluginConfigSpec
override protected def multipleAndNestedPersistAsyncs: ActorRef =
system.actorOf(
- Props(classOf[MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig],
- name,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig],
+ name,
+ testActor,
+ providedActorConfig))
override protected def deeplyNestedPersists(nestedPersists: Int): ActorRef =
system.actorOf(
- Props(classOf[DeeplyNestedPersistsWithLevelDbRuntimePluginConfig],
- name,
- nestedPersists,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[DeeplyNestedPersistsWithLevelDbRuntimePluginConfig],
+ name,
+ nestedPersists,
+ testActor,
+ providedActorConfig))
override protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef =
system.actorOf(
- Props(classOf[DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig],
- name,
- nestedPersistAsyncs,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig],
+ name,
+ nestedPersistAsyncs,
+ testActor,
+ providedActorConfig))
override protected def nestedPersistNormalAndAsyncs: ActorRef =
system.actorOf(
@@ -1937,10 +1983,11 @@ class LeveldbPersistentActorWithRuntimePluginConfigSpec
override protected def nestedPersistInAsyncEnforcesStashing: ActorRef =
system.actorOf(
- Props(classOf[NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig],
- name,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig],
+ name,
+ testActor,
+ providedActorConfig))
override protected def persistInRecovery: ActorRef =
namedPersistentActorWithProvidedConfig[PersistInRecoveryWithLevelDbRuntimePluginConfig](providedActorConfig)
@@ -2000,10 +2047,12 @@ class InmemPersistentActorWithRuntimePluginConfigSpec
.parseString(s"""
| custom.persistence.snapshot-store.local.dir = target/snapshots-InmemPersistentActorWithRuntimePluginConfigSpec/
""".stripMargin)
- .withValue(s"custom.persistence.journal.inmem",
- system.settings.config.getValue(s"akka.persistence.journal.inmem"))
- .withValue("custom.persistence.snapshot-store.local",
- system.settings.config.getValue("akka.persistence.snapshot-store.local"))
+ .withValue(
+ s"custom.persistence.journal.inmem",
+ system.settings.config.getValue(s"akka.persistence.journal.inmem"))
+ .withValue(
+ "custom.persistence.snapshot-store.local",
+ system.settings.config.getValue("akka.persistence.snapshot-store.local"))
}
override protected def behavior1PersistentActor: ActorRef =
@@ -2037,10 +2086,11 @@ class InmemPersistentActorWithRuntimePluginConfigSpec
override protected def snapshottingBecomingPersistentActor: ActorRef =
system.actorOf(
- Props(classOf[SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig],
- name,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig],
+ name,
+ testActor,
+ providedActorConfig))
override protected def replyInEventHandlerPersistentActor: ActorRef =
namedPersistentActorWithProvidedConfig[ReplyInEventHandlerPersistentActorWithInmemRuntimePluginConfig](
@@ -2078,10 +2128,11 @@ class InmemPersistentActorWithRuntimePluginConfigSpec
override protected def handleRecoveryFinishedEventPersistentActor: ActorRef =
system.actorOf(
- Props(classOf[HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig],
- name,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig],
+ name,
+ testActor,
+ providedActorConfig))
override protected def stressOrdering: ActorRef =
namedPersistentActorWithProvidedConfig[StressOrderingWithInmemRuntimePluginConfig](providedActorConfig)
@@ -2100,19 +2151,21 @@ class InmemPersistentActorWithRuntimePluginConfigSpec
override protected def deeplyNestedPersists(nestedPersists: Int): ActorRef =
system.actorOf(
- Props(classOf[DeeplyNestedPersistsWithInmemRuntimePluginConfig],
- name,
- nestedPersists,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[DeeplyNestedPersistsWithInmemRuntimePluginConfig],
+ name,
+ nestedPersists,
+ testActor,
+ providedActorConfig))
override protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef =
system.actorOf(
- Props(classOf[DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig],
- name,
- nestedPersistAsyncs,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig],
+ name,
+ nestedPersistAsyncs,
+ testActor,
+ providedActorConfig))
override protected def nestedPersistNormalAndAsyncs: ActorRef =
system.actorOf(
@@ -2124,10 +2177,11 @@ class InmemPersistentActorWithRuntimePluginConfigSpec
override protected def nestedPersistInAsyncEnforcesStashing: ActorRef =
system.actorOf(
- Props(classOf[NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig],
- name,
- testActor,
- providedActorConfig))
+ Props(
+ classOf[NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig],
+ name,
+ testActor,
+ providedActorConfig))
override protected def persistInRecovery: ActorRef =
namedPersistentActorWithProvidedConfig[PersistInRecoveryWithInmemRuntimePluginConfig](providedActorConfig)
diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala
index 4f2ec8d9b3..8a20997177 100644
--- a/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala
@@ -30,9 +30,10 @@ object SnapshotDirectoryFailureSpec {
class SnapshotDirectoryFailureSpec
extends AkkaSpec(
- PersistenceSpec.config("leveldb",
- "SnapshotDirectoryFailureSpec",
- extraConfig = Some(s"""
+ PersistenceSpec.config(
+ "leveldb",
+ "SnapshotDirectoryFailureSpec",
+ extraConfig = Some(s"""
akka.persistence.snapshot-store.local.dir = "${SnapshotDirectoryFailureSpec.inUseSnapshotPath}"
""")))
with ImplicitSender {
diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala
index 9b02f08b64..fb7557691c 100644
--- a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala
@@ -97,10 +97,11 @@ object SnapshotFailureRobustnessSpec {
class SnapshotFailureRobustnessSpec
extends PersistenceSpec(
- PersistenceSpec.config("leveldb",
- "SnapshotFailureRobustnessSpec",
- serialization = "off",
- extraConfig = Some(s"""
+ PersistenceSpec.config(
+ "leveldb",
+ "SnapshotFailureRobustnessSpec",
+ serialization = "off",
+ extraConfig = Some(s"""
akka.persistence.snapshot-store.local.class = "akka.persistence.SnapshotFailureRobustnessSpec$$FailingLocalSnapshotStore"
akka.persistence.snapshot-store.local-delete-fail = $${akka.persistence.snapshot-store.local}
akka.persistence.snapshot-store.local-delete-fail.class = "akka.persistence.SnapshotFailureRobustnessSpec$$DeleteFailingLocalSnapshotStore"
diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala
index 53abff2577..daef8d5882 100644
--- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala
@@ -66,11 +66,11 @@ object SnapshotSerializationSpec {
class SnapshotSerializationSpec
extends PersistenceSpec(
- PersistenceSpec.config("leveldb",
- "SnapshotSerializationSpec",
- serialization = "off",
- extraConfig =
- Some("""
+ PersistenceSpec.config(
+ "leveldb",
+ "SnapshotSerializationSpec",
+ serialization = "off",
+ extraConfig = Some("""
akka.actor {
serializers {
my-snapshot = "akka.persistence.SnapshotSerializationSpec$MySerializer"
diff --git a/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala b/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala
index fb280208c3..d034449fd0 100644
--- a/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala
@@ -361,8 +361,9 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config)
}
"save periodical snapshots if akka.persistence.fsm.enable-snapshot-after = on" in {
- val sys2 = ActorSystem("PersistentFsmSpec2",
- ConfigFactory.parseString("""
+ val sys2 = ActorSystem(
+ "PersistentFsmSpec2",
+ ConfigFactory.parseString("""
akka.persistence.fsm.enable-snapshot-after = on
akka.persistence.fsm.snapshot-after = 3
""").withFallback(PersistenceSpec.config("leveldb", "PersistentFSMSpec2")))
@@ -386,9 +387,10 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config)
} finally {
val storageLocations =
- List("akka.persistence.journal.leveldb.dir",
- "akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s => new File(sys2.settings.config.getString(s)))
+ List(
+ "akka.persistence.journal.leveldb.dir",
+ "akka.persistence.journal.leveldb-shared.store.dir",
+ "akka.persistence.snapshot-store.local.dir").map(s => new File(sys2.settings.config.getString(s)))
shutdown(sys2)
storageLocations.foreach(FileUtils.deleteDirectory)
}
diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala
index d0bd41d94b..93a5d1906f 100644
--- a/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala
@@ -105,8 +105,9 @@ class InmemEventAdaptersSpec extends AkkaSpec {
// combined-read-side only adapter
val r: EventAdapter = adapters.get(classOf[ReadMeTwiceEvent])
- r.fromJournal(r.toJournal(ReadMeTwiceEvent()), "").events.map(_.toString) shouldBe Seq("from-ReadMeTwiceEvent()",
- "again-ReadMeTwiceEvent()")
+ r.fromJournal(r.toJournal(ReadMeTwiceEvent()), "").events.map(_.toString) shouldBe Seq(
+ "from-ReadMeTwiceEvent()",
+ "again-ReadMeTwiceEvent()")
}
}
diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala
index 6449503a12..377dd0fbd0 100644
--- a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala
@@ -144,9 +144,10 @@ object JournalCompactionSpec {
class SpecComponentBuilder(val specId: String, val compactionInterval: Long) {
def config: Config = {
- PersistenceSpec.config("leveldb",
- specId,
- extraConfig = Some(s"""
+ PersistenceSpec.config(
+ "leveldb",
+ specId,
+ extraConfig = Some(s"""
| akka.persistence.journal.leveldb.compaction-intervals.$specId = $compactionInterval
""".stripMargin))
}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala
index 13d8b21eee..7cb7730cf6 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala
@@ -95,9 +95,8 @@ abstract class RemoteDeploymentDeathWatchSpec(multiNodeConfig: RemoteDeploymentD
catch {
case _: TimeoutException =>
fail(
- "Failed to stop [%s] within [%s] \n%s".format(system.name,
- timeout,
- system.asInstanceOf[ActorSystemImpl].printTree))
+ "Failed to stop [%s] within [%s] \n%s"
+ .format(system.name, timeout, system.asInstanceOf[ActorSystemImpl].printTree))
}
}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala
index f10bc1a854..cadad6c606 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala
@@ -70,9 +70,10 @@ abstract class RemoteGatePiercingSpec extends RemotingMultiNodeSpec(RemoteGatePi
enterBarrier("actors-communicate")
EventFilter.warning(pattern = "address is now gated", occurrences = 1).intercept {
- Await.result(RARP(system).provider.transport.managementCommand(
- ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)),
- 3.seconds)
+ Await.result(
+ RARP(system).provider.transport
+ .managementCommand(ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)),
+ 3.seconds)
}
enterBarrier("gated")
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala
index 0aa72db235..cf3ab1186d 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala
@@ -107,8 +107,9 @@ abstract class RemoteNodeRestartDeathWatchSpec(multiNodeConfig: RemoteNodeRestar
Await.ready(system.whenTerminated, 30.seconds)
- val freshSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val freshSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${address.port.get}
akka.remote.artery.canonical.port = ${address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala
index 40664158de..7134430b1b 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala
@@ -68,9 +68,10 @@ abstract class RemoteNodeRestartGateSpec extends RemotingMultiNodeSpec(RemoteNod
identify(second, "subject")
EventFilter.warning(pattern = "address is now gated", occurrences = 1).intercept {
- Await.result(RARP(system).provider.transport.managementCommand(
- ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)),
- 3.seconds)
+ Await.result(
+ RARP(system).provider.transport
+ .managementCommand(ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)),
+ 3.seconds)
}
enterBarrier("gated")
@@ -95,8 +96,9 @@ abstract class RemoteNodeRestartGateSpec extends RemotingMultiNodeSpec(RemoteNod
Await.ready(system.whenTerminated, 10.seconds)
- val freshSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val freshSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.retry-gate-closed-for = 0.5 s
akka.remote.netty.tcp {
hostname = ${address.host.get}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala
index bc95e8945d..ca557b7587 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala
@@ -81,8 +81,9 @@ abstract class RemoteNodeShutdownAndComesBackSpec extends RemotingMultiNodeSpec(
// Drop all messages from this point so no SHUTDOWN is ever received
testConductor.blackhole(second, first, Direction.Send).await
// Shut down all existing connections so that the system can enter recovery mode (association attempts)
- Await.result(RARP(system).provider.transport.managementCommand(ForceDisassociate(node(second).address)),
- 3.seconds)
+ Await.result(
+ RARP(system).provider.transport.managementCommand(ForceDisassociate(node(second).address)),
+ 3.seconds)
// Trigger reconnect attempt and also queue up a system message to be in limbo state (UID of remote system
// is unknown, and system message is pending)
@@ -133,8 +134,9 @@ abstract class RemoteNodeShutdownAndComesBackSpec extends RemotingMultiNodeSpec(
Await.ready(system.whenTerminated, 30.seconds)
- val freshSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val freshSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${address.port.get}
akka.remote.artery.canonical.port = ${address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala
index ab839128ec..8802f5e769 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala
@@ -51,9 +51,10 @@ abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePie
override def initialParticipants = roles.size
- def identifyWithUid(role: RoleName,
- actorName: String,
- timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = {
+ def identifyWithUid(
+ role: RoleName,
+ actorName: String,
+ timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = {
within(timeout) {
system.actorSelection(node(role) / "user" / actorName) ! "identify"
expectMsgType[(Long, ActorRef)]
@@ -107,8 +108,9 @@ abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePie
Await.ready(system.whenTerminated, 30.seconds)
- val freshSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val freshSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${address.port.get}
akka.remote.artery.canonical.port = ${address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala
index ed9d1c0130..067919681f 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala
@@ -115,8 +115,9 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo
Await.result(system.whenTerminated, 10.seconds)
- val freshSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val freshSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.retry-gate-closed-for = 0.5 s
akka.remote.netty.tcp {
hostname = ${address.host.get}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala
index c9044a1268..b7c4c4ee86 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala
@@ -86,9 +86,10 @@ abstract class Ticket15109Spec extends RemotingMultiNodeSpec(Ticket15109Spec) {
runOn(second) {
// Force a disassociation. Using the message Shutdown, which is suboptimal here, but this is the only
// DisassociateInfo that triggers the code-path we want to test
- Await.result(RARP(system).provider.transport
- .managementCommand(ForceDisassociateExplicitly(node(first).address, AssociationHandle.Shutdown)),
- 3.seconds)
+ Await.result(
+ RARP(system).provider.transport
+ .managementCommand(ForceDisassociateExplicitly(node(first).address, AssociationHandle.Shutdown)),
+ 3.seconds)
}
enterBarrier("disassociated")
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala
index cc37f224f5..196f00c0b8 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala
@@ -52,20 +52,21 @@ object BenchmarkFileReporter {
reportResults(s"Git commit: $gitCommit")
val settingsToReport =
- Seq("akka.test.MaxThroughputSpec.totalMessagesFactor",
- "akka.test.MaxThroughputSpec.real-message",
- "akka.test.LatencySpec.totalMessagesFactor",
- "akka.test.LatencySpec.repeatCount",
- "akka.test.LatencySpec.real-message",
- "akka.remote.artery.enabled",
- "akka.remote.artery.advanced.inbound-lanes",
- "akka.remote.artery.advanced.idle-cpu-level",
- "akka.remote.artery.advanced.buffer-pool-size",
- "akka.remote.artery.advanced.embedded-media-driver",
- "akka.remote.default-remote-dispatcher.throughput",
- "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-factor",
- "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-min",
- "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-max")
+ Seq(
+ "akka.test.MaxThroughputSpec.totalMessagesFactor",
+ "akka.test.MaxThroughputSpec.real-message",
+ "akka.test.LatencySpec.totalMessagesFactor",
+ "akka.test.LatencySpec.repeatCount",
+ "akka.test.LatencySpec.real-message",
+ "akka.remote.artery.enabled",
+ "akka.remote.artery.advanced.inbound-lanes",
+ "akka.remote.artery.advanced.idle-cpu-level",
+ "akka.remote.artery.advanced.buffer-pool-size",
+ "akka.remote.artery.advanced.embedded-media-driver",
+ "akka.remote.default-remote-dispatcher.throughput",
+ "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-factor",
+ "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-min",
+ "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-max")
settingsToReport.foreach(reportSetting)
def reportResults(result: String): Unit = synchronized {
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala
index 9fd1d67388..4c5e1e2e7a 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala
@@ -89,30 +89,34 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput
}
val scenarios = List(
- TestSettings(testName = "warmup",
- totalMessages = adjustedTotalMessages(20000),
- burstSize = 1000,
- payloadSize = 100,
- senderReceiverPairs = senderReceiverPairs,
- realMessage),
- TestSettings(testName = "size-100",
- totalMessages = adjustedTotalMessages(50000),
- burstSize = 1000,
- payloadSize = 100,
- senderReceiverPairs = senderReceiverPairs,
- realMessage),
- TestSettings(testName = "size-1k",
- totalMessages = adjustedTotalMessages(10000),
- burstSize = 1000,
- payloadSize = 1000,
- senderReceiverPairs = senderReceiverPairs,
- realMessage),
- TestSettings(testName = "size-10k",
- totalMessages = adjustedTotalMessages(2000),
- burstSize = 1000,
- payloadSize = 10000,
- senderReceiverPairs = senderReceiverPairs,
- realMessage))
+ TestSettings(
+ testName = "warmup",
+ totalMessages = adjustedTotalMessages(20000),
+ burstSize = 1000,
+ payloadSize = 100,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage),
+ TestSettings(
+ testName = "size-100",
+ totalMessages = adjustedTotalMessages(50000),
+ burstSize = 1000,
+ payloadSize = 100,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage),
+ TestSettings(
+ testName = "size-1k",
+ totalMessages = adjustedTotalMessages(10000),
+ burstSize = 1000,
+ payloadSize = 1000,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage),
+ TestSettings(
+ testName = "size-10k",
+ totalMessages = adjustedTotalMessages(2000),
+ burstSize = 1000,
+ payloadSize = 10000,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage))
def test(testSettings: TestSettings, resultReporter: BenchmarkFileReporter): Unit = {
import testSettings._
@@ -125,8 +129,9 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput
runOn(roles.head) {
val rep = reporter(testName)
val receivers = (1 to sendingNodes.size).map { n =>
- system.actorOf(receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs),
- receiverName + "-" + n)
+ system.actorOf(
+ receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs),
+ receiverName + "-" + n)
}
enterBarrier(receiverName + "-started")
enterBarrier(testName + "-done")
@@ -146,13 +151,15 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput
val idx = roles.indexOf(myself) - 1
val receiver = receivers(idx)
val plotProbe = TestProbe()
- val snd = system.actorOf(senderProps(receiver,
- receivers,
- testSettings,
- plotProbe.ref,
- printTaskRunnerMetrics = idx == 0,
- resultReporter),
- testName + "-snd" + idx)
+ val snd = system.actorOf(
+ senderProps(
+ receiver,
+ receivers,
+ testSettings,
+ plotProbe.ref,
+ printTaskRunnerMetrics = idx == 0,
+ resultReporter),
+ testName + "-snd" + idx)
val terminationProbe = TestProbe()
terminationProbe.watch(snd)
snd ! Run
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala
index 4e4bc3f6c5..d106641fe5 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala
@@ -88,30 +88,34 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp
// each sender may have 3 bursts in flight
val burstSize = 3000 / senderReceiverPairs / 3
val scenarios = List(
- TestSettings(testName = "warmup",
- totalMessages = adjustedTotalMessages(20000),
- burstSize = burstSize,
- payloadSize = 100,
- senderReceiverPairs = senderReceiverPairs,
- realMessage),
- TestSettings(testName = "size-100",
- totalMessages = adjustedTotalMessages(50000),
- burstSize = burstSize,
- payloadSize = 100,
- senderReceiverPairs = senderReceiverPairs,
- realMessage),
- TestSettings(testName = "size-1k",
- totalMessages = adjustedTotalMessages(10000),
- burstSize = burstSize,
- payloadSize = 1000,
- senderReceiverPairs = senderReceiverPairs,
- realMessage),
- TestSettings(testName = "size-10k",
- totalMessages = adjustedTotalMessages(2000),
- burstSize = burstSize,
- payloadSize = 10000,
- senderReceiverPairs = senderReceiverPairs,
- realMessage))
+ TestSettings(
+ testName = "warmup",
+ totalMessages = adjustedTotalMessages(20000),
+ burstSize = burstSize,
+ payloadSize = 100,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage),
+ TestSettings(
+ testName = "size-100",
+ totalMessages = adjustedTotalMessages(50000),
+ burstSize = burstSize,
+ payloadSize = 100,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage),
+ TestSettings(
+ testName = "size-1k",
+ totalMessages = adjustedTotalMessages(10000),
+ burstSize = burstSize,
+ payloadSize = 1000,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage),
+ TestSettings(
+ testName = "size-10k",
+ totalMessages = adjustedTotalMessages(2000),
+ burstSize = burstSize,
+ payloadSize = 10000,
+ senderReceiverPairs = senderReceiverPairs,
+ realMessage))
def test(testSettings: TestSettings, resultReporter: BenchmarkFileReporter): Unit = {
import testSettings._
@@ -123,8 +127,9 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp
runOn(targetNodes: _*) {
val rep = reporter(testName)
- val receiver = system.actorOf(receiverProps(rep, payloadSize, printTaskRunnerMetrics = true, senderReceiverPairs),
- receiverName)
+ val receiver = system.actorOf(
+ receiverProps(rep, payloadSize, printTaskRunnerMetrics = true, senderReceiverPairs),
+ receiverName)
enterBarrier(receiverName + "-started")
enterBarrier(testName + "-done")
receiver ! PoisonPill
@@ -138,13 +143,15 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp
val senders = for ((target, i) <- targetNodes.zipWithIndex) yield {
val receiver = receivers(i)
val plotProbe = TestProbe()
- val snd = system.actorOf(senderProps(receiver,
- receivers,
- testSettings,
- plotProbe.ref,
- printTaskRunnerMetrics = i == 0,
- resultReporter),
- testName + "-snd" + (i + 1))
+ val snd = system.actorOf(
+ senderProps(
+ receiver,
+ receivers,
+ testSettings,
+ plotProbe.ref,
+ printTaskRunnerMetrics = i == 0,
+ resultReporter),
+ testName + "-snd" + (i + 1))
val terminationProbe = TestProbe()
terminationProbe.watch(snd)
snd ! Run
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala
index 5a6fc2dcaf..51e29c055f 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala
@@ -55,9 +55,10 @@ abstract class HandshakeRestartReceiverSpec
super.afterAll()
}
- def identifyWithUid(rootPath: ActorPath,
- actorName: String,
- timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = {
+ def identifyWithUid(
+ rootPath: ActorPath,
+ actorName: String,
+ timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = {
within(timeout) {
system.actorSelection(rootPath / "user" / actorName) ! "identify"
expectMsgType[(Long, ActorRef)]
@@ -106,8 +107,9 @@ abstract class HandshakeRestartReceiverSpec
Await.result(system.whenTerminated, 10.seconds)
- val freshSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val freshSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.artery.canonical.port = ${address.port.get}
""").withFallback(system.settings.config))
freshSystem.actorOf(Props[Subject], "subject2")
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala
index 028a5a938d..474a49d051 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala
@@ -82,22 +82,24 @@ object LatencySpec extends MultiNodeConfig {
}
}
- def receiverProps(reporter: RateReporter,
- settings: TestSettings,
- totalMessages: Int,
- sendTimes: AtomicLongArray,
- histogram: Histogram,
- plotsRef: ActorRef,
- BenchmarkFileReporter: BenchmarkFileReporter): Props =
+ def receiverProps(
+ reporter: RateReporter,
+ settings: TestSettings,
+ totalMessages: Int,
+ sendTimes: AtomicLongArray,
+ histogram: Histogram,
+ plotsRef: ActorRef,
+ BenchmarkFileReporter: BenchmarkFileReporter): Props =
Props(new Receiver(reporter, settings, totalMessages, sendTimes, histogram, plotsRef, BenchmarkFileReporter))
- class Receiver(reporter: RateReporter,
- settings: TestSettings,
- totalMessages: Int,
- sendTimes: AtomicLongArray,
- histogram: Histogram,
- plotsRef: ActorRef,
- BenchmarkFileReporter: BenchmarkFileReporter)
+ class Receiver(
+ reporter: RateReporter,
+ settings: TestSettings,
+ totalMessages: Int,
+ sendTimes: AtomicLongArray,
+ histogram: Histogram,
+ plotsRef: ActorRef,
+ BenchmarkFileReporter: BenchmarkFileReporter)
extends Actor {
import settings._
@@ -138,11 +140,12 @@ object LatencySpec extends MultiNodeConfig {
}
}
- def printTotal(testName: String,
- payloadSize: Long,
- histogram: Histogram,
- totalDurationNanos: Long,
- reporter: BenchmarkFileReporter): Unit = {
+ def printTotal(
+ testName: String,
+ payloadSize: Long,
+ histogram: Histogram,
+ totalDurationNanos: Long,
+ reporter: BenchmarkFileReporter): Unit = {
def percentile(p: Double): Double = histogram.getValueAtPercentile(p) / 1000.0
val throughput = 1000.0 * histogram.getTotalCount / math.max(1, totalDurationNanos.nanos.toMillis)
@@ -157,18 +160,20 @@ object LatencySpec extends MultiNodeConfig {
taskRunnerMetrics.printHistograms()
- val plots = LatencyPlots(PlotResult().add(testName, percentile(50.0)),
- PlotResult().add(testName, percentile(90.0)),
- PlotResult().add(testName, percentile(99.0)))
+ val plots = LatencyPlots(
+ PlotResult().add(testName, percentile(50.0)),
+ PlotResult().add(testName, percentile(90.0)),
+ PlotResult().add(testName, percentile(99.0)))
plotsRef ! plots
}
}
- final case class TestSettings(testName: String,
- messageRate: Int, // msg/s
- payloadSize: Int,
- repeat: Int,
- realMessage: Boolean)
+ final case class TestSettings(
+ testName: String,
+ messageRate: Int, // msg/s
+ payloadSize: Int,
+ repeat: Int,
+ realMessage: Boolean)
}
@@ -214,31 +219,36 @@ abstract class LatencySpec extends RemotingMultiNodeSpec(LatencySpec) {
val scenarios = List(
TestSettings(testName = "warmup", messageRate = 10000, payloadSize = 100, repeat = repeatCount, realMessage),
- TestSettings(testName = "rate-100-size-100",
- messageRate = 100,
- payloadSize = 100,
- repeat = repeatCount,
- realMessage),
- TestSettings(testName = "rate-1000-size-100",
- messageRate = 1000,
- payloadSize = 100,
- repeat = repeatCount,
- realMessage),
- TestSettings(testName = "rate-10000-size-100",
- messageRate = 10000,
- payloadSize = 100,
- repeat = repeatCount,
- realMessage),
- TestSettings(testName = "rate-20000-size-100",
- messageRate = 20000,
- payloadSize = 100,
- repeat = repeatCount,
- realMessage),
- TestSettings(testName = "rate-1000-size-1k",
- messageRate = 1000,
- payloadSize = 1000,
- repeat = repeatCount,
- realMessage))
+ TestSettings(
+ testName = "rate-100-size-100",
+ messageRate = 100,
+ payloadSize = 100,
+ repeat = repeatCount,
+ realMessage),
+ TestSettings(
+ testName = "rate-1000-size-100",
+ messageRate = 1000,
+ payloadSize = 100,
+ repeat = repeatCount,
+ realMessage),
+ TestSettings(
+ testName = "rate-10000-size-100",
+ messageRate = 10000,
+ payloadSize = 100,
+ repeat = repeatCount,
+ realMessage),
+ TestSettings(
+ testName = "rate-20000-size-100",
+ messageRate = 20000,
+ payloadSize = 100,
+ repeat = repeatCount,
+ realMessage),
+ TestSettings(
+ testName = "rate-1000-size-1k",
+ messageRate = 1000,
+ payloadSize = 1000,
+ repeat = repeatCount,
+ realMessage))
def test(testSettings: TestSettings, BenchmarkFileReporter: BenchmarkFileReporter): Unit = {
import testSettings._
@@ -290,12 +300,13 @@ abstract class LatencySpec extends RemotingMultiNodeSpec(LatencySpec) {
val msg =
if (testSettings.realMessage)
- TestMessage(id = i,
- name = "abc",
- status = i % 2 == 0,
- description = "ABC",
- payload = payload,
- items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B")))
+ TestMessage(
+ id = i,
+ name = "abc",
+ status = i % 2 == 0,
+ description = "ABC",
+ payload = payload,
+ items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B")))
else payload
echo.tell(payload, receiver)
@@ -315,9 +326,10 @@ abstract class LatencySpec extends RemotingMultiNodeSpec(LatencySpec) {
val p = plotProbe.expectMsgType[LatencyPlots]
// only use the last repeat for the plots
if (n == repeat) {
- plots = plots.copy(plot50 = plots.plot50.addAll(p.plot50),
- plot90 = plots.plot90.addAll(p.plot90),
- plot99 = plots.plot99.addAll(p.plot99))
+ plots = plots.copy(
+ plot50 = plots.plot50.addAll(p.plot50),
+ plot90 = plots.plot90.addAll(p.plot90),
+ plot99 = plots.plot99.addAll(p.plot99))
}
}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala
index 677d614d0f..ae44528b88 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala
@@ -150,20 +150,22 @@ object MaxThroughputSpec extends MultiNodeConfig {
}
}
- def senderProps(mainTarget: Target,
- targets: Array[Target],
- testSettings: TestSettings,
- plotRef: ActorRef,
- printTaskRunnerMetrics: Boolean,
- reporter: BenchmarkFileReporter): Props =
+ def senderProps(
+ mainTarget: Target,
+ targets: Array[Target],
+ testSettings: TestSettings,
+ plotRef: ActorRef,
+ printTaskRunnerMetrics: Boolean,
+ reporter: BenchmarkFileReporter): Props =
Props(new Sender(mainTarget, targets, testSettings, plotRef, printTaskRunnerMetrics, reporter))
- class Sender(target: Target,
- targets: Array[Target],
- testSettings: TestSettings,
- plotRef: ActorRef,
- printTaskRunnerMetrics: Boolean,
- reporter: BenchmarkFileReporter)
+ class Sender(
+ target: Target,
+ targets: Array[Target],
+ testSettings: TestSettings,
+ plotRef: ActorRef,
+ printTaskRunnerMetrics: Boolean,
+ reporter: BenchmarkFileReporter)
extends Actor {
val numTargets = targets.size
@@ -283,12 +285,13 @@ object MaxThroughputSpec extends MultiNodeConfig {
while (i < batchSize) {
val msg0 =
if (realMessage)
- TestMessage(id = totalMessages - remaining + i,
- name = "abc",
- status = i % 2 == 0,
- description = "ABC",
- payload = payload,
- items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B")))
+ TestMessage(
+ id = totalMessages - remaining + i,
+ name = "abc",
+ status = i % 2 == 0,
+ description = "ABC",
+ payload = payload,
+ items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B")))
else payload
val msg1 = if (warmup) Warmup(msg0) else msg0
@@ -313,12 +316,13 @@ object MaxThroughputSpec extends MultiNodeConfig {
}
}
- final case class TestSettings(testName: String,
- totalMessages: Long,
- burstSize: Int,
- payloadSize: Int,
- senderReceiverPairs: Int,
- realMessage: Boolean) {
+ final case class TestSettings(
+ testName: String,
+ totalMessages: Long,
+ burstSize: Int,
+ payloadSize: Int,
+ senderReceiverPairs: Int,
+ realMessage: Boolean) {
// data based on measurement
def totalSize(system: ActorSystem) =
payloadSize + (if (RARP(system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) 38 else 110)
@@ -407,36 +411,41 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec
}
val scenarios = List(
- TestSettings(testName = "warmup",
- totalMessages = adjustedTotalMessages(20000),
- burstSize = 1000,
- payloadSize = 100,
- senderReceiverPairs = 1,
- realMessage),
- TestSettings(testName = "1-to-1",
- totalMessages = adjustedTotalMessages(50000),
- burstSize = 1000,
- payloadSize = 100,
- senderReceiverPairs = 1,
- realMessage),
- TestSettings(testName = "1-to-1-size-1k",
- totalMessages = adjustedTotalMessages(20000),
- burstSize = 1000,
- payloadSize = 1000,
- senderReceiverPairs = 1,
- realMessage),
- TestSettings(testName = "1-to-1-size-10k",
- totalMessages = adjustedTotalMessages(5000),
- burstSize = 1000,
- payloadSize = 10000,
- senderReceiverPairs = 1,
- realMessage),
- TestSettings(testName = "5-to-5",
- totalMessages = adjustedTotalMessages(20000),
- burstSize = 200, // don't exceed the send queue capacity 200*5*3=3000
- payloadSize = 100,
- senderReceiverPairs = 5,
- realMessage))
+ TestSettings(
+ testName = "warmup",
+ totalMessages = adjustedTotalMessages(20000),
+ burstSize = 1000,
+ payloadSize = 100,
+ senderReceiverPairs = 1,
+ realMessage),
+ TestSettings(
+ testName = "1-to-1",
+ totalMessages = adjustedTotalMessages(50000),
+ burstSize = 1000,
+ payloadSize = 100,
+ senderReceiverPairs = 1,
+ realMessage),
+ TestSettings(
+ testName = "1-to-1-size-1k",
+ totalMessages = adjustedTotalMessages(20000),
+ burstSize = 1000,
+ payloadSize = 1000,
+ senderReceiverPairs = 1,
+ realMessage),
+ TestSettings(
+ testName = "1-to-1-size-10k",
+ totalMessages = adjustedTotalMessages(5000),
+ burstSize = 1000,
+ payloadSize = 10000,
+ senderReceiverPairs = 1,
+ realMessage),
+ TestSettings(
+ testName = "5-to-5",
+ totalMessages = adjustedTotalMessages(20000),
+ burstSize = 200, // don't exceed the send queue capacity 200*5*3=3000
+ payloadSize = 100,
+ senderReceiverPairs = 5,
+ realMessage))
def test(testSettings: TestSettings, resultReporter: BenchmarkFileReporter): Unit = {
import testSettings._
@@ -447,8 +456,9 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec
runOn(second) {
val rep = reporter(testName)
val receivers = (1 to senderReceiverPairs).map { n =>
- system.actorOf(receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs),
- receiverName + n)
+ system.actorOf(
+ receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs),
+ receiverName + n)
}
enterBarrier(receiverName + "-started")
enterBarrier(testName + "-done")
@@ -463,13 +473,15 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec
val senders = for (n <- 1 to senderReceiverPairs) yield {
val receiver = receivers(n - 1)
val plotProbe = TestProbe()
- val snd = system.actorOf(senderProps(receiver,
- receivers,
- testSettings,
- plotProbe.ref,
- printTaskRunnerMetrics = n == 1,
- resultReporter),
- testName + "-snd" + n)
+ val snd = system.actorOf(
+ senderProps(
+ receiver,
+ receivers,
+ testSettings,
+ plotProbe.ref,
+ printTaskRunnerMetrics = n == 1,
+ resultReporter),
+ testName + "-snd" + n)
val terminationProbe = TestProbe()
terminationProbe.watch(snd)
snd ! Run
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala
index 8251137c90..20064731f7 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala
@@ -21,6 +21,7 @@ final case class PlotResult(values: Vector[(String, Number)] = Vector.empty) {
}
-final case class LatencyPlots(plot50: PlotResult = PlotResult(),
- plot90: PlotResult = PlotResult(),
- plot99: PlotResult = PlotResult())
+final case class LatencyPlots(
+ plot50: PlotResult = PlotResult(),
+ plot90: PlotResult = PlotResult(),
+ plot99: PlotResult = PlotResult())
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala
index 26ec6e6fd4..01e3631ef9 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala
@@ -44,9 +44,10 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo
override def initialParticipants = 2
- def identifyWithUid(role: RoleName,
- actorName: String,
- timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = {
+ def identifyWithUid(
+ role: RoleName,
+ actorName: String,
+ timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = {
within(timeout) {
system.actorSelection(node(role) / "user" / actorName) ! "identify"
expectMsgType[(Long, ActorRef)]
@@ -108,8 +109,9 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo
Await.result(system.whenTerminated, 10.seconds)
- val freshSystem = ActorSystem(system.name,
- ConfigFactory.parseString(s"""
+ val freshSystem = ActorSystem(
+ system.name,
+ ConfigFactory.parseString(s"""
akka.remote.artery.canonical.port = ${address.port.get}
""").withFallback(system.settings.config))
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala
index 93835d09b2..2da9f6c550 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala
@@ -13,12 +13,13 @@ object TestMessage {
final case class Item(id: Long, name: String)
}
-final case class TestMessage(id: Long,
- name: String,
- status: Boolean,
- description: String,
- payload: Array[Byte],
- items: Vector[TestMessage.Item])
+final case class TestMessage(
+ id: Long,
+ name: String,
+ status: Boolean,
+ description: String,
+ payload: Array[Byte],
+ items: Vector[TestMessage.Item])
class TestMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest {
@@ -53,11 +54,12 @@ class TestMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
TestMessage.Item(item.getId, item.getName)
}.toVector
- TestMessage(id = protoMsg.getId,
- name = protoMsg.getName,
- description = protoMsg.getDescription,
- status = protoMsg.getStatus,
- payload = protoMsg.getPayload.toByteArray(),
- items = items)
+ TestMessage(
+ id = protoMsg.getId,
+ name = protoMsg.getName,
+ description = protoMsg.getDescription,
+ status = protoMsg.getStatus,
+ payload = protoMsg.getPayload.toByteArray(),
+ items = items)
}
}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala
index b017597bb8..39f004c9c5 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala
@@ -8,10 +8,11 @@ import java.util.concurrent.TimeUnit.SECONDS
class TestRateReporter(name: String)
extends RateReporter(SECONDS.toNanos(1), new RateReporter.Reporter {
- override def onReport(messagesPerSec: Double,
- bytesPerSec: Double,
- totalMessages: Long,
- totalBytes: Long): Unit = {
+ override def onReport(
+ messagesPerSec: Double,
+ bytesPerSec: Double,
+ totalMessages: Long,
+ totalBytes: Long): Unit = {
println(
name +
f": ${messagesPerSec}%,.0f msgs/sec, ${bytesPerSec}%,.0f bytes/sec, " +
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala
index 0f5c120065..355b26c6f9 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala
@@ -62,10 +62,11 @@ object AeronStreamLatencySpec extends MultiNodeConfig {
}
""")))
- final case class TestSettings(testName: String,
- messageRate: Int, // msg/s
- payloadSize: Int,
- repeat: Int)
+ final case class TestSettings(
+ testName: String,
+ messageRate: Int, // msg/s
+ payloadSize: Int,
+ repeat: Int)
}
@@ -141,11 +142,12 @@ abstract class AeronStreamLatencySpec
super.afterAll()
}
- def printTotal(testName: String,
- payloadSize: Long,
- histogram: Histogram,
- totalDurationNanos: Long,
- lastRepeat: Boolean): Unit = {
+ def printTotal(
+ testName: String,
+ payloadSize: Long,
+ histogram: Histogram,
+ totalDurationNanos: Long,
+ lastRepeat: Boolean): Unit = {
def percentile(p: Double): Double = histogram.getValueAtPercentile(p) / 1000.0
val throughput = 1000.0 * histogram.getTotalCount / totalDurationNanos.nanos.toMillis
@@ -160,9 +162,10 @@ abstract class AeronStreamLatencySpec
// only use the last repeat for the plots
if (lastRepeat) {
- plots = plots.copy(plot50 = plots.plot50.add(testName, percentile(50.0)),
- plot90 = plots.plot90.add(testName, percentile(90.0)),
- plot99 = plots.plot99.add(testName, percentile(99.0)))
+ plots = plots.copy(
+ plot50 = plots.plot50.add(testName, percentile(50.0)),
+ plot90 = plots.plot90.add(testName, percentile(90.0)),
+ plot99 = plots.plot99.add(testName, percentile(99.0)))
}
}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala
index 5855acfc4a..910a5dd79b 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala
@@ -134,8 +134,9 @@ class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig)
runOn(fourth) {
enterBarrier("start")
val actor =
- system.actorOf(RoundRobinPool(nrOfInstances = 1, resizer = Some(new TestResizer)).props(Props[SomeActor]),
- "service-hello2")
+ system.actorOf(
+ RoundRobinPool(nrOfInstances = 1, resizer = Some(new TestResizer)).props(Props[SomeActor]),
+ "service-hello2")
actor.isInstanceOf[RoutedActorRef] should ===(true)
actor ! GetRoutees
diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
index feceb3c5e4..6c012aff63 100644
--- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
+++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala
@@ -195,13 +195,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender {
val msg = expectMsgType[Failed]
msg match {
case Failed(barrier, thr: BarrierEmpty)
- if (thr == BarrierEmpty(Data(Set(), "", Nil, thr.data.deadline),
- "cannot remove RoleName(a): no client to remove")) =>
+ if (thr == BarrierEmpty(
+ Data(Set(), "", Nil, thr.data.deadline),
+ "cannot remove RoleName(a): no client to remove")) =>
case x =>
- fail(
- "Expected " + Failed(barrier,
- BarrierEmpty(Data(Set(), "", Nil, null),
- "cannot remove RoleName(a): no client to remove")) + " but got " + x)
+ fail("Expected " + Failed(
+ barrier,
+ BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove")) + " but got " + x)
}
barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref)
a.send(barrier, EnterBarrier("bar9", None))
@@ -580,10 +580,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender {
probes.foreach(_.msgAvailable should ===(false))
}
- private def data(clients: Set[Controller.NodeInfo],
- barrier: String,
- arrived: List[ActorRef],
- previous: Data): Data = {
+ private def data(
+ clients: Set[Controller.NodeInfo],
+ barrier: String,
+ arrived: List[ActorRef],
+ previous: Data): Data = {
Data(clients, barrier, arrived, previous.deadline)
}
}
diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala
index 085c8755ce..904d4982ba 100644
--- a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala
+++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala
@@ -45,8 +45,9 @@ object LogRoleReplace extends ClipboardOwner {
val replacer = new LogRoleReplace
if (args.length == 0) {
- replacer.process(new BufferedReader(new InputStreamReader(System.in)),
- new PrintWriter(new OutputStreamWriter(System.out)))
+ replacer.process(
+ new BufferedReader(new InputStreamReader(System.in)),
+ new PrintWriter(new OutputStreamWriter(System.out)))
} else if (args(0) == "clipboard") {
val clipboard = Toolkit.getDefaultToolkit.getSystemClipboard
diff --git a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala
index 6076431fe4..a036ada1cb 100644
--- a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala
+++ b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala
@@ -89,10 +89,11 @@ class ResendUnfulfillableException
* @param maxSeq The maximum sequence number that has been stored in this buffer. Messages having lower sequence number
* will be not stored but rejected with [[java.lang.IllegalArgumentException]]
*/
-final case class AckedSendBuffer[T <: HasSequenceNumber](capacity: Int,
- nonAcked: IndexedSeq[T] = Vector.empty[T],
- nacked: IndexedSeq[T] = Vector.empty[T],
- maxSeq: SeqNo = SeqNo(-1)) {
+final case class AckedSendBuffer[T <: HasSequenceNumber](
+ capacity: Int,
+ nonAcked: IndexedSeq[T] = Vector.empty[T],
+ nacked: IndexedSeq[T] = Vector.empty[T],
+ maxSeq: SeqNo = SeqNo(-1)) {
/**
* Processes an incoming acknowledgement and returns a new buffer with only unacknowledged elements remaining.
@@ -156,8 +157,9 @@ final case class AckedReceiveBuffer[T <: HasSequenceNumber](
* @return The updated buffer containing the message.
*/
def receive(arrivedMsg: T): AckedReceiveBuffer[T] = {
- this.copy(cumulativeAck = max(arrivedMsg.seq, cumulativeAck),
- buf = if (arrivedMsg.seq > lastDelivered && !buf.contains(arrivedMsg)) buf + arrivedMsg else buf)
+ this.copy(
+ cumulativeAck = max(arrivedMsg.seq, cumulativeAck),
+ buf = if (arrivedMsg.seq > lastDelivered && !buf.contains(arrivedMsg)) buf + arrivedMsg else buf)
}
/**
@@ -201,9 +203,10 @@ final case class AckedReceiveBuffer[T <: HasSequenceNumber](
*/
def mergeFrom(that: AckedReceiveBuffer[T]): AckedReceiveBuffer[T] = {
val mergedLastDelivered = max(this.lastDelivered, that.lastDelivered)
- this.copy(lastDelivered = mergedLastDelivered,
- cumulativeAck = max(this.cumulativeAck, that.cumulativeAck),
- buf = this.buf.union(that.buf).filter { _.seq > mergedLastDelivered })
+ this.copy(
+ lastDelivered = mergedLastDelivered,
+ cumulativeAck = max(this.cumulativeAck, that.cumulativeAck),
+ buf = this.buf.union(that.buf).filter { _.seq > mergedLastDelivered })
}
override def toString = buf.map { _.seq }.mkString("[", ", ", "]")
diff --git a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala
index 1513dc1798..1dfc8313e0 100644
--- a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala
+++ b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala
@@ -37,8 +37,9 @@ class DeadlineFailureDetector(val acceptableHeartbeatPause: FiniteDuration, val
* Expecting config properties named `acceptable-heartbeat-pause`.
*/
def this(config: Config, ev: EventStream) =
- this(acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"),
- heartbeatInterval = config.getMillisDuration("heartbeat-interval"))
+ this(
+ acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"),
+ heartbeatInterval = config.getMillisDuration("heartbeat-interval"))
require(acceptableHeartbeatPause >= Duration.Zero, "failure-detector.acceptable-heartbeat-pause must be >= 0 s")
require(heartbeatInterval > Duration.Zero, "failure-detector.heartbeat-interval must be > 0 s")
diff --git a/akka-remote/src/main/scala/akka/remote/Endpoint.scala b/akka-remote/src/main/scala/akka/remote/Endpoint.scala
index 973b477bfe..a718f3d715 100644
--- a/akka-remote/src/main/scala/akka/remote/Endpoint.scala
+++ b/akka-remote/src/main/scala/akka/remote/Endpoint.scala
@@ -42,26 +42,29 @@ import akka.util.OptionVal
* INTERNAL API
*/
private[remote] trait InboundMessageDispatcher {
- def dispatch(recipient: InternalActorRef,
- recipientAddress: Address,
- serializedMessage: SerializedMessage,
- senderOption: OptionVal[ActorRef]): Unit
+ def dispatch(
+ recipient: InternalActorRef,
+ recipientAddress: Address,
+ serializedMessage: SerializedMessage,
+ senderOption: OptionVal[ActorRef]): Unit
}
/**
* INTERNAL API
*/
-private[remote] class DefaultMessageDispatcher(private val system: ExtendedActorSystem,
- private val provider: RemoteActorRefProvider,
- private val log: MarkerLoggingAdapter)
+private[remote] class DefaultMessageDispatcher(
+ private val system: ExtendedActorSystem,
+ private val provider: RemoteActorRefProvider,
+ private val log: MarkerLoggingAdapter)
extends InboundMessageDispatcher {
private val remoteDaemon = provider.remoteDaemon
- override def dispatch(recipient: InternalActorRef,
- recipientAddress: Address,
- serializedMessage: SerializedMessage,
- senderOption: OptionVal[ActorRef]): Unit = {
+ override def dispatch(
+ recipient: InternalActorRef,
+ recipientAddress: Address,
+ serializedMessage: SerializedMessage,
+ senderOption: OptionVal[ActorRef]): Unit = {
import provider.remoteSettings._
@@ -72,11 +75,12 @@ private[remote] class DefaultMessageDispatcher(private val system: ExtendedActor
def logMessageReceived(messageType: String): Unit = {
if (LogReceive && log.isDebugEnabled)
- log.debug(s"received $messageType RemoteMessage: [{}] to [{}]<+[{}] from [{}]",
- payload,
- recipient,
- originalReceiver,
- sender)
+ log.debug(
+ s"received $messageType RemoteMessage: [{}] to [{}]<+[{}] from [{}]",
+ payload,
+ recipient,
+ originalReceiver,
+ sender)
}
recipient match {
@@ -94,17 +98,19 @@ private[remote] class DefaultMessageDispatcher(private val system: ExtendedActor
case sel: ActorSelectionMessage =>
if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) ||
sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian))
- log.debug(LogMarker.Security,
- "operating in UntrustedMode, dropping inbound actor selection to [{}], " +
- "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration",
- sel.elements.mkString("/", "/", ""))
+ log.debug(
+ LogMarker.Security,
+ "operating in UntrustedMode, dropping inbound actor selection to [{}], " +
+ "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration",
+ sel.elements.mkString("/", "/", ""))
else
// run the receive logic for ActorSelectionMessage here to make sure it is not stuck on busy user actor
ActorSelection.deliverSelection(l, sender, sel)
case msg: PossiblyHarmful if UntrustedMode =>
- log.debug(LogMarker.Security,
- "operating in UntrustedMode, dropping inbound PossiblyHarmful message of type [{}]",
- msg.getClass.getName)
+ log.debug(
+ LogMarker.Security,
+ "operating in UntrustedMode, dropping inbound PossiblyHarmful message of type [{}]",
+ msg.getClass.getName)
case msg: SystemMessage => l.sendSystemMessage(msg)
case msg => l.!(msg)(sender)
}
@@ -115,18 +121,20 @@ private[remote] class DefaultMessageDispatcher(private val system: ExtendedActor
// if it was originally addressed to us but is in fact remote from our point of view (i.e. remote-deployed)
r.!(payload)(sender)
else
- log.error("dropping message [{}] for non-local recipient [{}] arriving at [{}] inbound addresses are [{}]",
- payloadClass,
- r,
- recipientAddress,
- provider.transport.addresses.mkString(", "))
+ log.error(
+ "dropping message [{}] for non-local recipient [{}] arriving at [{}] inbound addresses are [{}]",
+ payloadClass,
+ r,
+ recipientAddress,
+ provider.transport.addresses.mkString(", "))
case r =>
- log.error("dropping message [{}] for unknown recipient [{}] arriving at [{}] inbound addresses are [{}]",
- payloadClass,
- r,
- recipientAddress,
- provider.transport.addresses.mkString(", "))
+ log.error(
+ "dropping message [{}] for unknown recipient [{}] arriving at [{}] inbound addresses are [{}]",
+ payloadClass,
+ r,
+ recipientAddress,
+ provider.transport.addresses.mkString(", "))
}
}
@@ -160,10 +168,11 @@ private[remote] final case class ShutDownAssociation(localAddress: Address, remo
* INTERNAL API
*/
@SerialVersionUID(2L)
-private[remote] final case class InvalidAssociation(localAddress: Address,
- remoteAddress: Address,
- cause: Throwable,
- disassociationInfo: Option[DisassociateInfo] = None)
+private[remote] final case class InvalidAssociation(
+ localAddress: Address,
+ remoteAddress: Address,
+ cause: Throwable,
+ disassociationInfo: Option[DisassociateInfo] = None)
extends EndpointException("Invalid address: " + remoteAddress, cause)
with AssociationProblem
@@ -171,10 +180,11 @@ private[remote] final case class InvalidAssociation(localAddress: Address,
* INTERNAL API
*/
@SerialVersionUID(1L)
-private[remote] final case class HopelessAssociation(localAddress: Address,
- remoteAddress: Address,
- uid: Option[Int],
- cause: Throwable)
+private[remote] final case class HopelessAssociation(
+ localAddress: Address,
+ remoteAddress: Address,
+ uid: Option[Int],
+ cause: Throwable)
extends EndpointException("Catastrophic association error.")
with AssociationProblem
@@ -208,55 +218,60 @@ private[remote] object ReliableDeliverySupervisor {
case object Idle
case object TooLongIdle
- def props(handleOrActive: Option[AkkaProtocolHandle],
- localAddress: Address,
- remoteAddress: Address,
- refuseUid: Option[Int],
- transport: AkkaProtocolTransport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props =
- Props(classOf[ReliableDeliverySupervisor],
- handleOrActive,
- localAddress,
- remoteAddress,
- refuseUid,
- transport,
- settings,
- codec,
- receiveBuffers)
+ def props(
+ handleOrActive: Option[AkkaProtocolHandle],
+ localAddress: Address,
+ remoteAddress: Address,
+ refuseUid: Option[Int],
+ transport: AkkaProtocolTransport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props =
+ Props(
+ classOf[ReliableDeliverySupervisor],
+ handleOrActive,
+ localAddress,
+ remoteAddress,
+ refuseUid,
+ transport,
+ settings,
+ codec,
+ receiveBuffers)
}
/**
* INTERNAL API
*/
-private[remote] class ReliableDeliverySupervisor(handleOrActive: Option[AkkaProtocolHandle],
- val localAddress: Address,
- val remoteAddress: Address,
- val refuseUid: Option[Int],
- val transport: AkkaProtocolTransport,
- val settings: RemoteSettings,
- val codec: AkkaPduCodec,
- val receiveBuffers: ConcurrentHashMap[Link, ResendState])
+private[remote] class ReliableDeliverySupervisor(
+ handleOrActive: Option[AkkaProtocolHandle],
+ val localAddress: Address,
+ val remoteAddress: Address,
+ val refuseUid: Option[Int],
+ val transport: AkkaProtocolTransport,
+ val settings: RemoteSettings,
+ val codec: AkkaPduCodec,
+ val receiveBuffers: ConcurrentHashMap[Link, ResendState])
extends Actor
with ActorLogging {
import ReliableDeliverySupervisor._
import context.dispatcher
- val autoResendTimer = context.system.scheduler.schedule(settings.SysResendTimeout,
- settings.SysResendTimeout,
- self,
- AttemptSysMsgRedelivery)
+ val autoResendTimer = context.system.scheduler.schedule(
+ settings.SysResendTimeout,
+ settings.SysResendTimeout,
+ self,
+ AttemptSysMsgRedelivery)
override val supervisorStrategy = OneForOneStrategy(loggingEnabled = false) {
case _: AssociationProblem => Escalate
case NonFatal(e) =>
val causedBy = if (e.getCause == null) "" else s"Caused by: [${e.getCause.getMessage}]"
- log.warning("Association with remote system [{}] has failed, address is now gated for [{}] ms. Reason: [{}] {}",
- remoteAddress,
- settings.RetryGateClosedFor.toMillis,
- e.getMessage,
- causedBy)
+ log.warning(
+ "Association with remote system [{}] has failed, address is now gated for [{}] ms. Reason: [{}] {}",
+ remoteAddress,
+ settings.RetryGateClosedFor.toMillis,
+ e.getMessage,
+ causedBy)
uidConfirmed = false // Need confirmation of UID again
if ((resendBuffer.nacked.nonEmpty || resendBuffer.nonAcked.nonEmpty) && bailoutAt.isEmpty)
bailoutAt = Some(Deadline.now + settings.InitialSysMsgDeliveryTimeout)
@@ -340,13 +355,14 @@ private[remote] class ReliableDeliverySupervisor(handleOrActive: Option[AkkaProt
try resendBuffer = resendBuffer.acknowledge(ack)
catch {
case NonFatal(e) =>
- throw new HopelessAssociation(localAddress,
- remoteAddress,
- uid,
- new IllegalStateException(
- s"Error encountered while processing system message " +
- s"acknowledgement buffer: $resendBuffer ack: $ack",
- e))
+ throw new HopelessAssociation(
+ localAddress,
+ remoteAddress,
+ uid,
+ new IllegalStateException(
+ s"Error encountered while processing system message " +
+ s"acknowledgement buffer: $resendBuffer ack: $ack",
+ e))
}
resendNacked()
@@ -490,15 +506,16 @@ private[remote] class ReliableDeliverySupervisor(handleOrActive: Option[AkkaProt
context.watch(
context.actorOf(
RARP(context.system)
- .configureDispatcher(EndpointWriter.props(handleOrActive = currentHandle,
- localAddress = localAddress,
- remoteAddress = remoteAddress,
- refuseUid,
- transport = transport,
- settings = settings,
- AkkaPduProtobufCodec,
- receiveBuffers = receiveBuffers,
- reliableDeliverySupervisor = Some(self)))
+ .configureDispatcher(EndpointWriter.props(
+ handleOrActive = currentHandle,
+ localAddress = localAddress,
+ remoteAddress = remoteAddress,
+ refuseUid,
+ transport = transport,
+ settings = settings,
+ AkkaPduProtobufCodec,
+ receiveBuffers = receiveBuffers,
+ reliableDeliverySupervisor = Some(self)))
.withDeploy(Deploy.local),
"endpointWriter"))
}
@@ -507,11 +524,12 @@ private[remote] class ReliableDeliverySupervisor(handleOrActive: Option[AkkaProt
/**
* INTERNAL API
*/
-private[remote] abstract class EndpointActor(val localAddress: Address,
- val remoteAddress: Address,
- val transport: Transport,
- val settings: RemoteSettings,
- val codec: AkkaPduCodec)
+private[remote] abstract class EndpointActor(
+ val localAddress: Address,
+ val remoteAddress: Address,
+ val transport: Transport,
+ val settings: RemoteSettings,
+ val codec: AkkaPduCodec)
extends Actor
with ActorLogging {
@@ -534,25 +552,27 @@ private[remote] abstract class EndpointActor(val localAddress: Address,
*/
private[remote] object EndpointWriter {
- def props(handleOrActive: Option[AkkaProtocolHandle],
- localAddress: Address,
- remoteAddress: Address,
- refuseUid: Option[Int],
- transport: AkkaProtocolTransport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- receiveBuffers: ConcurrentHashMap[Link, ResendState],
- reliableDeliverySupervisor: Option[ActorRef]): Props =
- Props(classOf[EndpointWriter],
- handleOrActive,
- localAddress,
- remoteAddress,
- refuseUid,
- transport,
- settings,
- codec,
- receiveBuffers,
- reliableDeliverySupervisor)
+ def props(
+ handleOrActive: Option[AkkaProtocolHandle],
+ localAddress: Address,
+ remoteAddress: Address,
+ refuseUid: Option[Int],
+ transport: AkkaProtocolTransport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ receiveBuffers: ConcurrentHashMap[Link, ResendState],
+ reliableDeliverySupervisor: Option[ActorRef]): Props =
+ Props(
+ classOf[EndpointWriter],
+ handleOrActive,
+ localAddress,
+ remoteAddress,
+ refuseUid,
+ transport,
+ settings,
+ codec,
+ receiveBuffers,
+ reliableDeliverySupervisor)
/**
* This message signals that the current association maintained by the local EndpointWriter and EndpointReader is
@@ -586,15 +606,16 @@ private[remote] object EndpointWriter {
/**
* INTERNAL API
*/
-private[remote] class EndpointWriter(handleOrActive: Option[AkkaProtocolHandle],
- localAddress: Address,
- remoteAddress: Address,
- refuseUid: Option[Int],
- transport: AkkaProtocolTransport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- val receiveBuffers: ConcurrentHashMap[Link, ResendState],
- val reliableDeliverySupervisor: Option[ActorRef])
+private[remote] class EndpointWriter(
+ handleOrActive: Option[AkkaProtocolHandle],
+ localAddress: Address,
+ remoteAddress: Address,
+ refuseUid: Option[Int],
+ transport: AkkaProtocolTransport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ val receiveBuffers: ConcurrentHashMap[Link, ResendState],
+ val reliableDeliverySupervisor: Option[ActorRef])
extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) {
import EndpointWriter._
@@ -673,8 +694,9 @@ private[remote] class EndpointWriter(handleOrActive: Option[AkkaProtocolHandle],
case Status.Failure(e: InvalidAssociationException) =>
publishAndThrow(new InvalidAssociation(localAddress, remoteAddress, e), Logging.WarningLevel)
case Status.Failure(e) =>
- publishAndThrow(new EndpointAssociationException(s"Association failed with [$remoteAddress]", e),
- Logging.DebugLevel)
+ publishAndThrow(
+ new EndpointAssociationException(s"Association failed with [$remoteAddress]", e),
+ Logging.DebugLevel)
case Handle(inboundHandle) =>
// Assert handle == None?
context.parent ! ReliableDeliverySupervisor.GotUid(inboundHandle.handshakeInfo.uid, remoteAddress)
@@ -784,10 +806,11 @@ private[remote] class EndpointWriter(handleOrActive: Option[AkkaProtocolHandle],
if (size > settings.LogBufferSizeExceeding) {
val now = System.nanoTime()
if (now - largeBufferLogTimestamp >= LogBufferSizeInterval) {
- log.warning("[{}] buffered messages in EndpointWriter for [{}]. " +
- "You should probably implement flow control to avoid flooding the remote connection.",
- size,
- remoteAddress)
+ log.warning(
+ "[{}] buffered messages in EndpointWriter for [{}]. " +
+ "You should probably implement flow control to avoid flooding the remote connection.",
+ size,
+ remoteAddress)
largeBufferLogTimestamp = now
}
}
@@ -849,12 +872,13 @@ private[remote] class EndpointWriter(handleOrActive: Option[AkkaProtocolHandle],
log.debug("sending message {}", msgLog)
}
- val pdu = codec.constructMessage(s.recipient.localAddressToUse,
- s.recipient,
- serializeMessage(s.message),
- s.senderOption,
- seqOption = s.seqOpt,
- ackOption = lastAck)
+ val pdu = codec.constructMessage(
+ s.recipient.localAddressToUse,
+ s.recipient,
+ serializeMessage(s.message),
+ s.senderOption,
+ seqOption = s.seqOpt,
+ ackOption = lastAck)
val pduSize = pdu.size
remoteMetrics.logPayloadBytes(s.message, pduSize)
@@ -965,16 +989,17 @@ private[remote] class EndpointWriter(handleOrActive: Option[AkkaProtocolHandle],
context.actorOf(
RARP(context.system)
.configureDispatcher(
- EndpointReader.props(localAddress,
- remoteAddress,
- transport,
- settings,
- codec,
- msgDispatch,
- inbound,
- handle.handshakeInfo.uid,
- reliableDeliverySupervisor,
- receiveBuffers))
+ EndpointReader.props(
+ localAddress,
+ remoteAddress,
+ transport,
+ settings,
+ codec,
+ msgDispatch,
+ inbound,
+ handle.handshakeInfo.uid,
+ reliableDeliverySupervisor,
+ receiveBuffers))
.withDeploy(Deploy.local),
"endpointReader-" + AddressUrlEncoder(remoteAddress) + "-" + readerId.next()))
handle.readHandlerPromise.success(ActorHandleEventListener(newReader))
@@ -997,43 +1022,46 @@ private[remote] class EndpointWriter(handleOrActive: Option[AkkaProtocolHandle],
*/
private[remote] object EndpointReader {
- def props(localAddress: Address,
- remoteAddress: Address,
- transport: Transport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- msgDispatch: InboundMessageDispatcher,
- inbound: Boolean,
- uid: Int,
- reliableDeliverySupervisor: Option[ActorRef],
- receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props =
- Props(classOf[EndpointReader],
- localAddress,
- remoteAddress,
- transport,
- settings,
- codec,
- msgDispatch,
- inbound,
- uid,
- reliableDeliverySupervisor,
- receiveBuffers)
+ def props(
+ localAddress: Address,
+ remoteAddress: Address,
+ transport: Transport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ msgDispatch: InboundMessageDispatcher,
+ inbound: Boolean,
+ uid: Int,
+ reliableDeliverySupervisor: Option[ActorRef],
+ receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props =
+ Props(
+ classOf[EndpointReader],
+ localAddress,
+ remoteAddress,
+ transport,
+ settings,
+ codec,
+ msgDispatch,
+ inbound,
+ uid,
+ reliableDeliverySupervisor,
+ receiveBuffers)
}
/**
* INTERNAL API
*/
-private[remote] class EndpointReader(localAddress: Address,
- remoteAddress: Address,
- transport: Transport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- msgDispatch: InboundMessageDispatcher,
- val inbound: Boolean,
- val uid: Int,
- val reliableDeliverySupervisor: Option[ActorRef],
- val receiveBuffers: ConcurrentHashMap[Link, ResendState])
+private[remote] class EndpointReader(
+ localAddress: Address,
+ remoteAddress: Address,
+ transport: Transport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ msgDispatch: InboundMessageDispatcher,
+ val inbound: Boolean,
+ val uid: Int,
+ val reliableDeliverySupervisor: Option[ActorRef],
+ val receiveBuffers: ConcurrentHashMap[Link, ResendState])
extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) {
import EndpointWriter.{ OutboundAck, StopReading, StoppedReading }
@@ -1063,9 +1091,10 @@ private[remote] class EndpointReader(localAddress: Address,
if (expectedState eq null) {
if (receiveBuffers.putIfAbsent(key, ResendState(uid, ackedReceiveBuffer)) ne null)
updateSavedState(key, receiveBuffers.get(key))
- } else if (!receiveBuffers.replace(key,
- expectedState,
- merge(ResendState(uid, ackedReceiveBuffer), expectedState)))
+ } else if (!receiveBuffers.replace(
+ key,
+ expectedState,
+ merge(ResendState(uid, ackedReceiveBuffer), expectedState)))
updateSavedState(key, receiveBuffers.get(key))
}
@@ -1112,11 +1141,12 @@ private[remote] class EndpointReader(localAddress: Address,
private def logTransientSerializationError(msg: AkkaPduCodec.Message, error: Exception): Unit = {
val sm = msg.serializedMessage
- log.warning("Serializer not defined for message with serializer id [{}] and manifest [{}]. " +
- "Transient association error (association remains live). {}",
- sm.getSerializerId,
- if (sm.hasMessageManifest) sm.getMessageManifest.toStringUtf8 else "",
- error.getMessage)
+ log.warning(
+ "Serializer not defined for message with serializer id [{}] and manifest [{}]. " +
+ "Transient association error (association remains live). {}",
+ sm.getSerializerId,
+ if (sm.hasMessageManifest) sm.getMessageManifest.toStringUtf8 else "",
+ error.getMessage)
}
def notReading: Receive = {
@@ -1130,11 +1160,12 @@ private[remote] class EndpointReader(localAddress: Address,
for (ack <- ackOption; reliableDelivery <- reliableDeliverySupervisor) reliableDelivery ! ack
if (log.isWarningEnabled)
- log.warning("Discarding inbound message to [{}] in read-only association to [{}]. " +
- "If this happens often you may consider using akka.remote.use-passive-connections=off " +
- "or use Artery TCP.",
- msgOption.map(_.recipient).getOrElse("unknown"),
- remoteAddress)
+ log.warning(
+ "Discarding inbound message to [{}] in read-only association to [{}]. " +
+ "If this happens often you may consider using akka.remote.use-passive-connections=off " +
+ "or use Artery TCP.",
+ msgOption.map(_.recipient).getOrElse("unknown"),
+ remoteAddress)
case InboundPayload(oversized) =>
log.error(
@@ -1155,12 +1186,13 @@ private[remote] class EndpointReader(localAddress: Address,
remoteAddress,
InvalidAssociationException("The remote system terminated the association because it is shutting down."))
case AssociationHandle.Quarantined =>
- throw InvalidAssociation(localAddress,
- remoteAddress,
- InvalidAssociationException(
- "The remote system has quarantined this system. No further associations " +
- "to the remote system are possible until this system is restarted."),
- Some(AssociationHandle.Quarantined))
+ throw InvalidAssociation(
+ localAddress,
+ remoteAddress,
+ InvalidAssociationException(
+ "The remote system has quarantined this system. No further associations " +
+ "to the remote system are possible until this system is restarted."),
+ Some(AssociationHandle.Quarantined))
}
private def deliverAndAck(): Unit = {
diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala
index feff8ff414..6b4d9b3f43 100644
--- a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala
+++ b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala
@@ -68,8 +68,9 @@ private[akka] object FailureDetectorLoader {
system
.asInstanceOf[ExtendedActorSystem]
.dynamicAccess
- .createInstanceFor[FailureDetector](fqcn,
- List(classOf[Config] -> config, classOf[EventStream] -> system.eventStream))
+ .createInstanceFor[FailureDetector](
+ fqcn,
+ List(classOf[Config] -> config, classOf[EventStream] -> system.eventStream))
.recover({
case e =>
throw new ConfigurationException(s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e)
diff --git a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala
index 8518f0334c..1150160b30 100644
--- a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala
@@ -29,9 +29,10 @@ private[akka] object MessageSerializer {
*/
def deserialize(system: ExtendedActorSystem, messageProtocol: SerializedMessage): AnyRef = {
SerializationExtension(system)
- .deserialize(messageProtocol.getMessage.toByteArray,
- messageProtocol.getSerializerId,
- if (messageProtocol.hasMessageManifest) messageProtocol.getMessageManifest.toStringUtf8 else "")
+ .deserialize(
+ messageProtocol.getMessage.toByteArray,
+ messageProtocol.getSerializerId,
+ if (messageProtocol.hasMessageManifest) messageProtocol.getMessageManifest.toStringUtf8 else "")
.get
}
@@ -60,16 +61,18 @@ private[akka] object MessageSerializer {
builder.build
} catch {
case NonFatal(e) =>
- throw new SerializationException(s"Failed to serialize remote message [${message.getClass}] " +
- s"using serializer [${serializer.getClass}].",
- e)
+ throw new SerializationException(
+ s"Failed to serialize remote message [${message.getClass}] " +
+ s"using serializer [${serializer.getClass}].",
+ e)
} finally Serialization.currentTransportInformation.value = oldInfo
}
- def serializeForArtery(serialization: Serialization,
- outboundEnvelope: OutboundEnvelope,
- headerBuilder: HeaderBuilder,
- envelope: EnvelopeBuffer): Unit = {
+ def serializeForArtery(
+ serialization: Serialization,
+ outboundEnvelope: OutboundEnvelope,
+ headerBuilder: HeaderBuilder,
+ envelope: EnvelopeBuffer): Unit = {
val message = outboundEnvelope.message
val serializer = serialization.findSerializerFor(message)
val oldInfo = Serialization.currentTransportInformation.value
@@ -89,12 +92,13 @@ private[akka] object MessageSerializer {
} finally Serialization.currentTransportInformation.value = oldInfo
}
- def deserializeForArtery(@unused system: ExtendedActorSystem,
- @unused originUid: Long,
- serialization: Serialization,
- serializer: Int,
- classManifest: String,
- envelope: EnvelopeBuffer): AnyRef = {
+ def deserializeForArtery(
+ @unused system: ExtendedActorSystem,
+ @unused originUid: Long,
+ serialization: Serialization,
+ serializer: Int,
+ classManifest: String,
+ envelope: EnvelopeBuffer): AnyRef = {
serialization.deserializeByteBuffer(envelope.byteBuffer, serializer, classManifest)
}
}
diff --git a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala
index ebf16e04ae..a8f5c9b722 100644
--- a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala
+++ b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala
@@ -50,23 +50,26 @@ import akka.util.Helpers.ConfigOps
* @param clock The clock, returning current time in milliseconds, but can be faked for testing
* purposes. It is only used for measuring intervals (duration).
*/
-class PhiAccrualFailureDetector(val threshold: Double,
- val maxSampleSize: Int,
- val minStdDeviation: FiniteDuration,
- val acceptableHeartbeatPause: FiniteDuration,
- val firstHeartbeatEstimate: FiniteDuration,
- eventStream: Option[EventStream])(implicit
- clock: Clock)
+class PhiAccrualFailureDetector(
+ val threshold: Double,
+ val maxSampleSize: Int,
+ val minStdDeviation: FiniteDuration,
+ val acceptableHeartbeatPause: FiniteDuration,
+ val firstHeartbeatEstimate: FiniteDuration,
+ eventStream: Option[EventStream])(
+ implicit
+ clock: Clock)
extends FailureDetector {
/**
* Constructor without eventStream to support backwards compatibility
*/
- def this(threshold: Double,
- maxSampleSize: Int,
- minStdDeviation: FiniteDuration,
- acceptableHeartbeatPause: FiniteDuration,
- firstHeartbeatEstimate: FiniteDuration)(implicit clock: Clock) =
+ def this(
+ threshold: Double,
+ maxSampleSize: Int,
+ minStdDeviation: FiniteDuration,
+ acceptableHeartbeatPause: FiniteDuration,
+ firstHeartbeatEstimate: FiniteDuration)(implicit clock: Clock) =
this(threshold, maxSampleSize, minStdDeviation, acceptableHeartbeatPause, firstHeartbeatEstimate, None)(clock)
/**
@@ -76,12 +79,13 @@ class PhiAccrualFailureDetector(val threshold: Double,
* `heartbeat-interval`.
*/
def this(config: Config, ev: EventStream) =
- this(threshold = config.getDouble("threshold"),
- maxSampleSize = config.getInt("max-sample-size"),
- minStdDeviation = config.getMillisDuration("min-std-deviation"),
- acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"),
- firstHeartbeatEstimate = config.getMillisDuration("heartbeat-interval"),
- Some(ev))
+ this(
+ threshold = config.getDouble("threshold"),
+ maxSampleSize = config.getInt("max-sample-size"),
+ minStdDeviation = config.getMillisDuration("min-std-deviation"),
+ acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"),
+ firstHeartbeatEstimate = config.getMillisDuration("heartbeat-interval"),
+ Some(ev))
require(threshold > 0.0, "failure-detector.threshold must be > 0")
require(maxSampleSize > 0, "failure-detector.max-sample-size must be > 0")
@@ -135,9 +139,10 @@ class PhiAccrualFailureDetector(val threshold: Double,
if (isAvailable(timestamp)) {
if (interval >= (acceptableHeartbeatPauseMillis / 3 * 2) && eventStream.isDefined)
eventStream.get.publish(
- Warning(this.toString,
- getClass,
- s"heartbeat interval is growing too large for address $address: $interval millis"))
+ Warning(
+ this.toString,
+ getClass,
+ s"heartbeat interval is growing too large for address $address: $interval millis"))
oldState.history :+ interval
} else oldState.history
}
@@ -205,10 +210,11 @@ private[akka] object HeartbeatHistory {
* for empty HeartbeatHistory, i.e. throws ArithmeticException.
*/
def apply(maxSampleSize: Int): HeartbeatHistory =
- HeartbeatHistory(maxSampleSize = maxSampleSize,
- intervals = immutable.IndexedSeq.empty,
- intervalSum = 0L,
- squaredIntervalSum = 0L)
+ HeartbeatHistory(
+ maxSampleSize = maxSampleSize,
+ intervals = immutable.IndexedSeq.empty,
+ intervalSum = 0L,
+ squaredIntervalSum = 0L)
}
@@ -219,10 +225,11 @@ private[akka] object HeartbeatHistory {
* The stats (mean, variance, stdDeviation) are not defined for
* for empty HeartbeatHistory, i.e. throws ArithmeticException.
*/
-private[akka] final case class HeartbeatHistory private (maxSampleSize: Int,
- intervals: immutable.IndexedSeq[Long],
- intervalSum: Long,
- squaredIntervalSum: Long) {
+private[akka] final case class HeartbeatHistory private (
+ maxSampleSize: Int,
+ intervals: immutable.IndexedSeq[Long],
+ intervalSum: Long,
+ squaredIntervalSum: Long) {
// Heartbeat histories are created trough the firstHeartbeat variable of the PhiAccrualFailureDetector
// which always have intervals.size > 0.
@@ -242,19 +249,21 @@ private[akka] final case class HeartbeatHistory private (maxSampleSize: Int,
@tailrec
final def :+(interval: Long): HeartbeatHistory = {
if (intervals.size < maxSampleSize)
- HeartbeatHistory(maxSampleSize,
- intervals = intervals :+ interval,
- intervalSum = intervalSum + interval,
- squaredIntervalSum = squaredIntervalSum + pow2(interval))
+ HeartbeatHistory(
+ maxSampleSize,
+ intervals = intervals :+ interval,
+ intervalSum = intervalSum + interval,
+ squaredIntervalSum = squaredIntervalSum + pow2(interval))
else
dropOldest :+ interval // recur
}
private def dropOldest: HeartbeatHistory =
- HeartbeatHistory(maxSampleSize,
- intervals = intervals.drop(1),
- intervalSum = intervalSum - intervals.head,
- squaredIntervalSum = squaredIntervalSum - pow2(intervals.head))
+ HeartbeatHistory(
+ maxSampleSize,
+ intervals = intervals.drop(1),
+ intervalSum = intervalSum - intervals.head,
+ squaredIntervalSum = squaredIntervalSum - pow2(intervals.head))
private def pow2(x: Long) = x * x
}
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
index e0338a1277..29e8924c64 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
@@ -108,14 +108,16 @@ private[akka] object RemoteActorRefProvider {
if (seqOpt.isEmpty) super.!(DeadLetter(m, senderOption.getOrElse(_provider.deadLetters), recipient))
case env: OutboundEnvelope =>
super.!(
- DeadLetter(unwrapSystemMessageEnvelope(env.message),
- env.sender.getOrElse(_provider.deadLetters),
- env.recipient.getOrElse(_provider.deadLetters)))
+ DeadLetter(
+ unwrapSystemMessageEnvelope(env.message),
+ env.sender.getOrElse(_provider.deadLetters),
+ env.recipient.getOrElse(_provider.deadLetters)))
case DeadLetter(env: OutboundEnvelope, _, _) =>
super.!(
- DeadLetter(unwrapSystemMessageEnvelope(env.message),
- env.sender.getOrElse(_provider.deadLetters),
- env.recipient.getOrElse(_provider.deadLetters)))
+ DeadLetter(
+ unwrapSystemMessageEnvelope(env.message),
+ env.sender.getOrElse(_provider.deadLetters),
+ env.recipient.getOrElse(_provider.deadLetters)))
case _ => super.!(message)(sender)
}
@@ -136,10 +138,11 @@ private[akka] object RemoteActorRefProvider {
* Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it.
*
*/
-private[akka] class RemoteActorRefProvider(val systemName: String,
- val settings: ActorSystem.Settings,
- val eventStream: EventStream,
- val dynamicAccess: DynamicAccess)
+private[akka] class RemoteActorRefProvider(
+ val systemName: String,
+ val settings: ActorSystem.Settings,
+ val eventStream: EventStream,
+ val dynamicAccess: DynamicAccess)
extends ActorRefProvider {
import RemoteActorRefProvider._
@@ -205,12 +208,13 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
val internals = Internals(
remoteDaemon = {
- val d = new RemoteSystemDaemon(system,
- local.rootPath / "remote",
- rootGuardian,
- remotingTerminator,
- _log,
- untrustedMode = remoteSettings.UntrustedMode)
+ val d = new RemoteSystemDaemon(
+ system,
+ local.rootPath / "remote",
+ rootGuardian,
+ remotingTerminator,
+ _log,
+ untrustedMode = remoteSettings.UntrustedMode)
local.registerExtraNames(Map(("remote", d)))
d
},
@@ -237,34 +241,38 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
val failureDetector = createRemoteWatcherFailureDetector(system)
system.systemActorOf(
configureDispatcher(
- RemoteWatcher.props(failureDetector,
- heartbeatInterval = WatchHeartBeatInterval,
- unreachableReaperInterval = WatchUnreachableReaperInterval,
- heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter)),
+ RemoteWatcher.props(
+ failureDetector,
+ heartbeatInterval = WatchHeartBeatInterval,
+ unreachableReaperInterval = WatchUnreachableReaperInterval,
+ heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter)),
"remote-watcher")
}
protected def createRemoteWatcherFailureDetector(system: ExtendedActorSystem): FailureDetectorRegistry[Address] = {
def createFailureDetector(): FailureDetector =
- FailureDetectorLoader.load(remoteSettings.WatchFailureDetectorImplementationClass,
- remoteSettings.WatchFailureDetectorConfig,
- system)
+ FailureDetectorLoader.load(
+ remoteSettings.WatchFailureDetectorImplementationClass,
+ remoteSettings.WatchFailureDetectorConfig,
+ system)
new DefaultFailureDetectorRegistry(() => createFailureDetector())
}
protected def createRemoteDeploymentWatcher(system: ActorSystemImpl): ActorRef =
- system.systemActorOf(remoteSettings.configureDispatcher(Props[RemoteDeploymentWatcher]()),
- "remote-deployment-watcher")
+ system.systemActorOf(
+ remoteSettings.configureDispatcher(Props[RemoteDeploymentWatcher]()),
+ "remote-deployment-watcher")
- def actorOf(system: ActorSystemImpl,
- props: Props,
- supervisor: InternalActorRef,
- path: ActorPath,
- systemService: Boolean,
- deploy: Option[Deploy],
- lookupDeploy: Boolean,
- async: Boolean): InternalActorRef =
+ def actorOf(
+ system: ActorSystemImpl,
+ props: Props,
+ supervisor: InternalActorRef,
+ path: ActorPath,
+ systemService: Boolean,
+ deploy: Option[Deploy],
+ lookupDeploy: Boolean,
+ async: Boolean): InternalActorRef =
if (systemService) local.actorOf(system, props, supervisor, path, systemService, deploy, lookupDeploy, async)
else {
@@ -360,12 +368,13 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
if (hasAddress(path.address)) actorFor(rootGuardian, path.elements)
else
try {
- new RemoteActorRef(transport,
- transport.localAddressForRemote(path.address),
- path,
- Nobody,
- props = None,
- deploy = None)
+ new RemoteActorRef(
+ transport,
+ transport.localAddressForRemote(path.address),
+ path,
+ Nobody,
+ props = None,
+ deploy = None)
} catch {
case NonFatal(e) =>
log.error(e, "Error while looking up address [{}]", path.address)
@@ -380,12 +389,13 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
else {
val rootPath = RootActorPath(address) / elems
try {
- new RemoteActorRef(transport,
- transport.localAddressForRemote(address),
- rootPath,
- Nobody,
- props = None,
- deploy = None)
+ new RemoteActorRef(
+ transport,
+ transport.localAddressForRemote(address),
+ rootPath,
+ Nobody,
+ props = None,
+ deploy = None)
} catch {
case NonFatal(e) =>
log.error(e, "Error while looking up address [{}]", rootPath.address)
@@ -403,12 +413,13 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
if (hasAddress(address)) rootGuardian
else
try {
- new RemoteActorRef(transport,
- transport.localAddressForRemote(address),
- RootActorPath(address),
- Nobody,
- props = None,
- deploy = None)
+ new RemoteActorRef(
+ transport,
+ transport.localAddressForRemote(address),
+ RootActorPath(address),
+ Nobody,
+ props = None,
+ deploy = None)
} catch {
case NonFatal(e) =>
log.error(e, "No root guardian at [{}]", address)
@@ -427,12 +438,13 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
local.resolveActorRef(rootGuardian, elems)
else
try {
- new RemoteActorRef(transport,
- localAddress,
- RootActorPath(address) / elems,
- Nobody,
- props = None,
- deploy = None)
+ new RemoteActorRef(
+ transport,
+ localAddress,
+ RootActorPath(address) / elems,
+ Nobody,
+ props = None,
+ deploy = None)
} catch {
case NonFatal(e) =>
log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage)
@@ -463,12 +475,13 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
else {
val rootPath = RootActorPath(address) / elems
try {
- new RemoteActorRef(transport,
- transport.localAddressForRemote(address),
- rootPath,
- Nobody,
- props = None,
- deploy = None)
+ new RemoteActorRef(
+ transport,
+ transport.localAddressForRemote(address),
+ rootPath,
+ Nobody,
+ props = None,
+ deploy = None)
} catch {
case NonFatal(e) =>
log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage)
@@ -484,12 +497,13 @@ private[akka] class RemoteActorRefProvider(val systemName: String,
if (hasAddress(path.address)) local.resolveActorRef(rootGuardian, path.elements)
else
try {
- new RemoteActorRef(transport,
- transport.localAddressForRemote(path.address),
- path,
- Nobody,
- props = None,
- deploy = None)
+ new RemoteActorRef(
+ transport,
+ transport.localAddressForRemote(path.address),
+ path,
+ Nobody,
+ props = None,
+ deploy = None)
} catch {
case NonFatal(e) =>
log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage)
@@ -563,12 +577,13 @@ private[akka] trait RemoteRef extends ActorRefScope {
* Remote ActorRef that is used when referencing the Actor on a different node than its "home" node.
* This reference is network-aware (remembers its origin) and immutable.
*/
-private[akka] class RemoteActorRef private[akka] (remote: RemoteTransport,
- val localAddressToUse: Address,
- val path: ActorPath,
- val getParent: InternalActorRef,
- props: Option[Props],
- deploy: Option[Deploy])
+private[akka] class RemoteActorRef private[akka] (
+ remote: RemoteTransport,
+ val localAddressToUse: Address,
+ val path: ActorPath,
+ val getParent: InternalActorRef,
+ props: Option[Props],
+ deploy: Option[Deploy])
extends InternalActorRef
with RemoteRef {
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala
index a0d02eddc9..962e22eb5f 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala
@@ -55,12 +55,13 @@ private[akka] final case class DaemonMsgCreate(props: Props, deploy: Deploy, pat
*
* It acts as the brain of the remote that responds to system remote events (messages) and undertakes action.
*/
-private[akka] class RemoteSystemDaemon(system: ActorSystemImpl,
- _path: ActorPath,
- _parent: InternalActorRef,
- terminator: ActorRef,
- _log: MarkerLoggingAdapter,
- val untrustedMode: Boolean)
+private[akka] class RemoteSystemDaemon(
+ system: ActorSystemImpl,
+ _path: ActorPath,
+ _parent: InternalActorRef,
+ terminator: ActorRef,
+ _log: MarkerLoggingAdapter,
+ val untrustedMode: Boolean)
extends VirtualPathContainer(system.provider, _path, _parent, _log) {
import akka.actor.SystemGuardian._
@@ -169,12 +170,13 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl,
else {
val ex =
new NotWhitelistedClassRemoteDeploymentAttemptException(props.actorClass, remoteDeploymentWhitelist)
- log.error(LogMarker.Security,
- ex,
- "Received command to create remote Actor, but class [{}] is not white-listed! " +
- "Target path: [{}]",
- props.actorClass,
- path)
+ log.error(
+ LogMarker.Security,
+ ex,
+ "Received command to create remote Actor, but class [{}] is not white-listed! " +
+ "Target path: [{}]",
+ props.actorClass,
+ path)
}
case DaemonMsgCreate(props, deploy, path, supervisor) =>
doCreateActor(message, props, deploy, path, supervisor)
@@ -242,14 +244,15 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl,
}
val isTerminating = !terminating.whileOff {
val parent = supervisor.asInstanceOf[InternalActorRef]
- val actor = system.provider.actorOf(system,
- props,
- parent,
- p,
- systemService = false,
- Some(deploy),
- lookupDeploy = true,
- async = false)
+ val actor = system.provider.actorOf(
+ system,
+ props,
+ parent,
+ p,
+ systemService = false,
+ Some(deploy),
+ lookupDeploy = true,
+ async = false)
addChild(childName, actor)
actor.sendSystemMessage(Watch(actor, this))
actor.start()
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala
index 02d22d4f28..6a37a7a90a 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala
@@ -129,9 +129,10 @@ final class RemoteSettings(val config: Config) {
val Transports: immutable.Seq[(String, immutable.Seq[String], Config)] = transportNames.map { name =>
val transportConfig = transportConfigFor(name)
- (transportConfig.getString("transport-class"),
- immutableSeq(transportConfig.getStringList("applied-adapters")).reverse,
- transportConfig)
+ (
+ transportConfig.getString("transport-class"),
+ immutableSeq(transportConfig.getStringList("applied-adapters")).reverse,
+ transportConfig)
}
val Adapters: Map[String, String] = configToMap(getConfig("akka.remote.adapters"))
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala
index 5c49bbeb0c..dce73d24e2 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala
@@ -22,15 +22,17 @@ private[akka] object RemoteWatcher {
/**
* Factory method for `RemoteWatcher` [[akka.actor.Props]].
*/
- def props(failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
- heartbeatExpectedResponseAfter: FiniteDuration): Props =
- Props(classOf[RemoteWatcher],
- failureDetector,
- heartbeatInterval,
- unreachableReaperInterval,
- heartbeatExpectedResponseAfter).withDeploy(Deploy.local)
+ def props(
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
+ heartbeatExpectedResponseAfter: FiniteDuration): Props =
+ Props(
+ classOf[RemoteWatcher],
+ failureDetector,
+ heartbeatInterval,
+ unreachableReaperInterval,
+ heartbeatExpectedResponseAfter).withDeploy(Deploy.local)
final case class WatchRemote(watchee: InternalActorRef, watcher: InternalActorRef)
final case class UnwatchRemote(watchee: InternalActorRef, watcher: InternalActorRef)
@@ -52,8 +54,9 @@ private[akka] object RemoteWatcher {
lazy val empty: Stats = counts(0, 0)
def counts(watching: Int, watchingNodes: Int): Stats = Stats(watching, watchingNodes)(Set.empty, Set.empty)
}
- final case class Stats(watching: Int, watchingNodes: Int)(val watchingRefs: Set[(ActorRef, ActorRef)],
- val watchingAddresses: Set[Address]) {
+ final case class Stats(watching: Int, watchingNodes: Int)(
+ val watchingRefs: Set[(ActorRef, ActorRef)],
+ val watchingAddresses: Set[Address]) {
override def toString: String = {
def formatWatchingRefs: String =
watchingRefs.map(x => x._2.path.name + " -> " + x._1.path.name).mkString("[", ", ", "]")
@@ -85,10 +88,11 @@ private[akka] object RemoteWatcher {
* both directions, but independent of each other.
*
*/
-private[akka] class RemoteWatcher(failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
- heartbeatExpectedResponseAfter: FiniteDuration)
+private[akka] class RemoteWatcher(
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
+ heartbeatExpectedResponseAfter: FiniteDuration)
extends Actor
with ActorLogging
with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
diff --git a/akka-remote/src/main/scala/akka/remote/Remoting.scala b/akka-remote/src/main/scala/akka/remote/Remoting.scala
index 44ebfd7369..5c806076ae 100644
--- a/akka-remote/src/main/scala/akka/remote/Remoting.scala
+++ b/akka-remote/src/main/scala/akka/remote/Remoting.scala
@@ -76,8 +76,9 @@ private[remote] object Remoting {
final val EndpointManagerName = "endpointManager"
- def localAddressForRemote(transportMapping: Map[String, Set[(AkkaProtocolTransport, Address)]],
- remote: Address): Address = {
+ def localAddressForRemote(
+ transportMapping: Map[String, Set[(AkkaProtocolTransport, Address)]],
+ remote: Address): Address = {
transportMapping.get(remote.protocol) match {
case Some(transports) =>
@@ -239,8 +240,9 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc
case Some(manager) =>
manager.tell(Send(message, senderOption, recipient), sender = senderOption.getOrElse(Actor.noSender))
case None =>
- throw new RemoteTransportExceptionNoStackTrace("Attempted to send remote message but Remoting is not running.",
- null)
+ throw new RemoteTransportExceptionNoStackTrace(
+ "Attempted to send remote message but Remoting is not running.",
+ null)
}
override def managementCommand(cmd: Any): Future[Boolean] = endpointManager match {
@@ -284,10 +286,11 @@ private[remote] object EndpointManager {
final case class Listen(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]]) extends RemotingCommand
case object StartupFinished extends RemotingCommand
case object ShutdownAndFlush extends RemotingCommand
- final case class Send(message: Any,
- senderOption: OptionVal[ActorRef],
- recipient: RemoteActorRef,
- seqOpt: Option[SeqNo] = None)
+ final case class Send(
+ message: Any,
+ senderOption: OptionVal[ActorRef],
+ recipient: RemoteActorRef,
+ seqOpt: Option[SeqNo] = None)
extends RemotingCommand
with HasSequenceNumber {
override def toString = s"Remote message $senderOption -> $recipient"
@@ -302,8 +305,9 @@ private[remote] object EndpointManager {
// Messages internal to EndpointManager
case object Prune extends NoSerializationVerificationNeeded
- final case class ListensResult(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]],
- results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])])
+ final case class ListensResult(
+ addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]],
+ results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])])
extends NoSerializationVerificationNeeded
final case class ListensFailure(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]], cause: Throwable)
extends NoSerializationVerificationNeeded
@@ -501,8 +505,9 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
def keepQuarantinedOr(remoteAddress: Address)(body: => Unit): Unit = endpoints.refuseUid(remoteAddress) match {
case Some(uid) =>
- log.info("Quarantined address [{}] is still unreachable or has not been restarted. Keeping it quarantined.",
- remoteAddress)
+ log.info(
+ "Quarantined address [{}] is still unreachable or has not been restarted. Keeping it quarantined.",
+ remoteAddress)
// Restoring Quarantine marker overwritten by a Pass(endpoint, refuseUid) pair while probing remote system.
endpoints.markAsQuarantined(remoteAddress, uid, Deadline.now + settings.QuarantineDuration)
case None => body
@@ -511,10 +516,11 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
override val supervisorStrategy = {
def hopeless(e: HopelessAssociation): SupervisorStrategy.Directive = e match {
case HopelessAssociation(_, remoteAddress, Some(uid), reason) =>
- log.error(reason,
- "Association to [{}] with UID [{}] irrecoverably failed. Quarantining address.",
- remoteAddress,
- uid)
+ log.error(
+ reason,
+ "Association to [{}] with UID [{}] irrecoverably failed. Quarantining address.",
+ remoteAddress,
+ uid)
settings.QuarantineDuration match {
case d: FiniteDuration =>
endpoints.markAsQuarantined(remoteAddress, uid, Deadline.now + d)
@@ -525,10 +531,11 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
case HopelessAssociation(_, remoteAddress, None, _) =>
keepQuarantinedOr(remoteAddress) {
- log.warning("Association to [{}] with unknown UID is irrecoverably failed. " +
- "Address cannot be quarantined without knowing the UID, gating instead for {} ms.",
- remoteAddress,
- settings.RetryGateClosedFor.toMillis)
+ log.warning(
+ "Association to [{}] with unknown UID is irrecoverably failed. " +
+ "Address cannot be quarantined without knowing the UID, gating instead for {} ms.",
+ remoteAddress,
+ settings.RetryGateClosedFor.toMillis)
endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor)
}
Stop
@@ -557,10 +564,11 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
case ShutDownAssociation(_, remoteAddress, _) =>
keepQuarantinedOr(remoteAddress) {
- log.debug("Remote system with address [{}] has shut down. " +
- "Address is now gated for {} ms, all messages to this address will be delivered to dead letters.",
- remoteAddress,
- settings.RetryGateClosedFor.toMillis)
+ log.debug(
+ "Remote system with address [{}] has shut down. " +
+ "Address is now gated for {} ms, all messages to this address will be delivered to dead letters.",
+ remoteAddress,
+ settings.RetryGateClosedFor.toMillis)
endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor)
}
Stop
@@ -599,8 +607,9 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
}
.map {
case (a, t) if t.size > 1 =>
- throw new RemoteTransportException(s"There are more than one transports listening on local address [$a]",
- null)
+ throw new RemoteTransportException(
+ s"There are more than one transports listening on local address [$a]",
+ null)
case (a, t) => a -> t.head._1
}
// Register to each transport as listener and collect mapping to addresses
@@ -634,10 +643,11 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
(endpoints.writableEndpointWithPolicyFor(address), uidToQuarantineOption) match {
case (Some(Pass(endpoint, _)), None) =>
context.stop(endpoint)
- log.warning("Association to [{}] with unknown UID is reported as quarantined, but " +
- "address cannot be quarantined without knowing the UID, gating instead for {} ms.",
- address,
- settings.RetryGateClosedFor.toMillis)
+ log.warning(
+ "Association to [{}] with unknown UID is reported as quarantined, but " +
+ "address cannot be quarantined without knowing the UID, gating instead for {} ms.",
+ address,
+ settings.RetryGateClosedFor.toMillis)
endpoints.markAsFailed(endpoint, Deadline.now + settings.RetryGateClosedFor)
case (Some(Pass(endpoint, uidOption)), Some(quarantineUid)) =>
uidOption match {
@@ -648,9 +658,10 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
// or it does not match with the UID to be quarantined
case None if !endpoints.refuseUid(address).contains(quarantineUid) =>
// the quarantine uid may be got fresh by cluster gossip, so update refuseUid for late handle when the writer got uid
- endpoints.registerWritableEndpointRefuseUid(address,
- quarantineUid,
- Deadline.now + settings.QuarantineDuration)
+ endpoints.registerWritableEndpointRefuseUid(
+ address,
+ quarantineUid,
+ Deadline.now + settings.QuarantineDuration)
case _ => //the quarantine uid has lost the race with some failure, do nothing
}
case (Some(Quarantined(uid, _)), Some(quarantineUid)) if uid == quarantineUid => // the UID to be quarantined already exists, do nothing
@@ -701,14 +712,16 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
val recipientAddress = recipientRef.path.address
def createAndRegisterWritingEndpoint(): ActorRef = {
- endpoints.registerWritableEndpoint(recipientAddress,
- uid = None,
- createEndpoint(recipientAddress,
- recipientRef.localAddressToUse,
- transportMapping(recipientRef.localAddressToUse),
- settings,
- handleOption = None,
- writing = true))
+ endpoints.registerWritableEndpoint(
+ recipientAddress,
+ uid = None,
+ createEndpoint(
+ recipientAddress,
+ recipientRef.localAddressToUse,
+ transportMapping(recipientRef.localAddressToUse),
+ settings,
+ handleOption = None,
+ writing = true))
}
endpoints.writableEndpointWithPolicyFor(recipientAddress) match {
@@ -835,12 +848,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
val writing = settings.UsePassiveConnections && !endpoints.hasWritableEndpointFor(handle.remoteAddress)
eventPublisher.notifyListeners(AssociatedEvent(handle.localAddress, handle.remoteAddress, inbound = true))
- val endpoint = createEndpoint(handle.remoteAddress,
- handle.localAddress,
- transportMapping(handle.localAddress),
- settings,
- Some(handle),
- writing)
+ val endpoint = createEndpoint(
+ handle.remoteAddress,
+ handle.localAddress,
+ transportMapping(handle.localAddress),
+ settings,
+ Some(handle),
+ writing)
if (writing)
endpoints.registerWritableEndpoint(handle.remoteAddress, Some(handle.handshakeInfo.uid), endpoint)
@@ -908,12 +922,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
pendingReadHandoffs -= takingOverFrom
eventPublisher.notifyListeners(AssociatedEvent(handle.localAddress, handle.remoteAddress, inbound = true))
- val endpoint = createEndpoint(handle.remoteAddress,
- handle.localAddress,
- transportMapping(handle.localAddress),
- settings,
- Some(handle),
- writing = false)
+ val endpoint = createEndpoint(
+ handle.remoteAddress,
+ handle.localAddress,
+ transportMapping(handle.localAddress),
+ settings,
+ Some(handle),
+ writing = false)
endpoints.registerReadOnlyEndpoint(handle.remoteAddress, endpoint, handle.handshakeInfo.uid)
}
}
@@ -923,12 +938,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
pendingReadHandoffs -= takingOverFrom
}
- private def createEndpoint(remoteAddress: Address,
- localAddress: Address,
- transport: AkkaProtocolTransport,
- endpointSettings: RemoteSettings,
- handleOption: Option[AkkaProtocolHandle],
- writing: Boolean): ActorRef = {
+ private def createEndpoint(
+ remoteAddress: Address,
+ localAddress: Address,
+ transport: AkkaProtocolTransport,
+ endpointSettings: RemoteSettings,
+ handleOption: Option[AkkaProtocolHandle],
+ writing: Boolean): ActorRef = {
require(transportMapping contains localAddress, "Transport mapping is not defined for the address")
// refuseUid is ignored for read-only endpoints since the UID of the remote system is already known and has passed
// quarantine checks
@@ -939,14 +955,15 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
context.actorOf(
RARP(extendedSystem)
.configureDispatcher(
- ReliableDeliverySupervisor.props(handleOption,
- localAddress,
- remoteAddress,
- refuseUid,
- transport,
- endpointSettings,
- AkkaPduProtobufCodec,
- receiveBuffers))
+ ReliableDeliverySupervisor.props(
+ handleOption,
+ localAddress,
+ remoteAddress,
+ refuseUid,
+ transport,
+ endpointSettings,
+ AkkaPduProtobufCodec,
+ receiveBuffers))
.withDeploy(Deploy.local),
"reliableEndpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next()))
else
@@ -954,15 +971,16 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter)
context.actorOf(
RARP(extendedSystem)
.configureDispatcher(
- EndpointWriter.props(handleOption,
- localAddress,
- remoteAddress,
- refuseUid,
- transport,
- endpointSettings,
- AkkaPduProtobufCodec,
- receiveBuffers,
- reliableDeliverySupervisor = None))
+ EndpointWriter.props(
+ handleOption,
+ localAddress,
+ remoteAddress,
+ refuseUid,
+ transport,
+ endpointSettings,
+ AkkaPduProtobufCodec,
+ receiveBuffers,
+ reliableDeliverySupervisor = None))
.withDeploy(Deploy.local),
"endpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next()))
}
diff --git a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala
index c3d03def29..d792b2b260 100644
--- a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala
@@ -44,11 +44,12 @@ final case class DisassociatedEvent(localAddress: Address, remoteAddress: Addres
}
@SerialVersionUID(1L)
-final case class AssociationErrorEvent(cause: Throwable,
- localAddress: Address,
- remoteAddress: Address,
- inbound: Boolean,
- logLevel: Logging.LogLevel)
+final case class AssociationErrorEvent(
+ cause: Throwable,
+ localAddress: Address,
+ remoteAddress: Address,
+ inbound: Boolean,
+ logLevel: Logging.LogLevel)
extends AssociationEvent {
protected override def eventName: String = "AssociationError"
override def toString: String = s"${super.toString}: Error [${cause.getMessage}] [${Logging.stackTraceFor(cause)}]"
diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala
index 38492d7dc8..5420fccbf0 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala
@@ -155,8 +155,9 @@ private[akka] final class ArterySettings private (config: Config) {
.requiring(interval => interval > Duration.Zero, "stop-idle-outbound-after must be more than zero")
val QuarantineIdleOutboundAfter: FiniteDuration = config
.getMillisDuration("quarantine-idle-outbound-after")
- .requiring(interval => interval > StopIdleOutboundAfter,
- "quarantine-idle-outbound-after must be greater than stop-idle-outbound-after")
+ .requiring(
+ interval => interval > StopIdleOutboundAfter,
+ "quarantine-idle-outbound-after must be greater than stop-idle-outbound-after")
val StopQuarantinedAfterIdle: FiniteDuration =
config
.getMillisDuration("stop-quarantined-after-idle")
diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala
index 674e136a13..8fe67a2a1b 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala
@@ -98,11 +98,12 @@ private[remote] trait InboundContext {
*/
private[remote] object AssociationState {
def apply(): AssociationState =
- new AssociationState(incarnation = 1,
- uniqueRemoteAddressPromise = Promise(),
- lastUsedTimestamp = new AtomicLong(System.nanoTime()),
- controlIdleKillSwitch = OptionVal.None,
- quarantined = ImmutableLongMap.empty[QuarantinedTimestamp])
+ new AssociationState(
+ incarnation = 1,
+ uniqueRemoteAddressPromise = Promise(),
+ lastUsedTimestamp = new AtomicLong(System.nanoTime()),
+ controlIdleKillSwitch = OptionVal.None,
+ quarantined = ImmutableLongMap.empty[QuarantinedTimestamp])
final case class QuarantinedTimestamp(nanoTime: Long) {
override def toString: String =
@@ -113,11 +114,12 @@ private[remote] object AssociationState {
/**
* INTERNAL API
*/
-private[remote] final class AssociationState(val incarnation: Int,
- val uniqueRemoteAddressPromise: Promise[UniqueAddress],
- val lastUsedTimestamp: AtomicLong, // System.nanoTime timestamp
- val controlIdleKillSwitch: OptionVal[SharedKillSwitch],
- val quarantined: ImmutableLongMap[AssociationState.QuarantinedTimestamp]) {
+private[remote] final class AssociationState(
+ val incarnation: Int,
+ val uniqueRemoteAddressPromise: Promise[UniqueAddress],
+ val lastUsedTimestamp: AtomicLong, // System.nanoTime timestamp
+ val controlIdleKillSwitch: OptionVal[SharedKillSwitch],
+ val quarantined: ImmutableLongMap[AssociationState.QuarantinedTimestamp]) {
import AssociationState.QuarantinedTimestamp
@@ -144,20 +146,22 @@ private[remote] final class AssociationState(val incarnation: Int,
}
def newIncarnation(remoteAddressPromise: Promise[UniqueAddress]): AssociationState =
- new AssociationState(incarnation + 1,
- remoteAddressPromise,
- lastUsedTimestamp = new AtomicLong(System.nanoTime()),
- controlIdleKillSwitch,
- quarantined)
+ new AssociationState(
+ incarnation + 1,
+ remoteAddressPromise,
+ lastUsedTimestamp = new AtomicLong(System.nanoTime()),
+ controlIdleKillSwitch,
+ quarantined)
def newQuarantined(): AssociationState =
uniqueRemoteAddressPromise.future.value match {
case Some(Success(a)) =>
- new AssociationState(incarnation,
- uniqueRemoteAddressPromise,
- lastUsedTimestamp = new AtomicLong(System.nanoTime()),
- controlIdleKillSwitch,
- quarantined = quarantined.updated(a.uid, QuarantinedTimestamp(System.nanoTime())))
+ new AssociationState(
+ incarnation,
+ uniqueRemoteAddressPromise,
+ lastUsedTimestamp = new AtomicLong(System.nanoTime()),
+ controlIdleKillSwitch,
+ quarantined = quarantined.updated(a.uid, QuarantinedTimestamp(System.nanoTime())))
case _ => this
}
@@ -171,11 +175,12 @@ private[remote] final class AssociationState(val incarnation: Int,
def isQuarantined(uid: Long): Boolean = quarantined.contains(uid)
def withControlIdleKillSwitch(killSwitch: OptionVal[SharedKillSwitch]): AssociationState =
- new AssociationState(incarnation,
- uniqueRemoteAddressPromise,
- lastUsedTimestamp,
- controlIdleKillSwitch = killSwitch,
- quarantined)
+ new AssociationState(
+ incarnation,
+ uniqueRemoteAddressPromise,
+ lastUsedTimestamp,
+ controlIdleKillSwitch = killSwitch,
+ quarantined)
override def toString(): String = {
val a = uniqueRemoteAddressPromise.future.value match {
@@ -234,10 +239,11 @@ private[remote] trait OutboundContext {
* INTERNAL API
*/
private[remote] object FlushOnShutdown {
- def props(done: Promise[Done],
- timeout: FiniteDuration,
- inboundContext: InboundContext,
- associations: Set[Association]): Props = {
+ def props(
+ done: Promise[Done],
+ timeout: FiniteDuration,
+ inboundContext: InboundContext,
+ associations: Set[Association]): Props = {
require(associations.nonEmpty)
Props(new FlushOnShutdown(done, timeout, inboundContext, associations))
}
@@ -248,10 +254,11 @@ private[remote] object FlushOnShutdown {
/**
* INTERNAL API
*/
-private[remote] class FlushOnShutdown(done: Promise[Done],
- timeout: FiniteDuration,
- @unused inboundContext: InboundContext,
- associations: Set[Association])
+private[remote] class FlushOnShutdown(
+ done: Promise[Done],
+ timeout: FiniteDuration,
+ @unused inboundContext: InboundContext,
+ associations: Set[Association])
extends Actor {
var remaining = Map.empty[UniqueAddress, Int]
@@ -414,14 +421,15 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
private val associationRegistry = new AssociationRegistry(
remoteAddress =>
- new Association(this,
- materializer,
- controlMaterializer,
- remoteAddress,
- controlSubject,
- settings.LargeMessageDestinations,
- priorityMessageDestinations,
- outboundEnvelopePool))
+ new Association(
+ this,
+ materializer,
+ controlMaterializer,
+ remoteAddress,
+ controlSubject,
+ settings.LargeMessageDestinations,
+ priorityMessageDestinations,
+ outboundEnvelopePool))
def remoteAddresses: Set[Address] = associationRegistry.allAssociations.map(_.remoteAddress)
@@ -446,12 +454,14 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
else ArteryTransport.autoSelectPort(settings.Bind.Hostname, udp)
} else settings.Bind.Port
- _localAddress = UniqueAddress(Address(ArteryTransport.ProtocolName, system.name, settings.Canonical.Hostname, port),
- AddressUidExtension(system).longAddressUid)
+ _localAddress = UniqueAddress(
+ Address(ArteryTransport.ProtocolName, system.name, settings.Canonical.Hostname, port),
+ AddressUidExtension(system).longAddressUid)
_addresses = Set(_localAddress.address)
- _bindAddress = UniqueAddress(Address(ArteryTransport.ProtocolName, system.name, settings.Bind.Hostname, bindPort),
- AddressUidExtension(system).longAddressUid)
+ _bindAddress = UniqueAddress(
+ Address(ArteryTransport.ProtocolName, system.name, settings.Bind.Hostname, bindPort),
+ AddressUidExtension(system).longAddressUid)
// TODO: This probably needs to be a global value instead of an event as events might rotate out of the log
topLevelFlightRecorder.loFreq(Transport_UniqueAddressSet, _localAddress.toString())
@@ -469,10 +479,11 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
startRemoveQuarantinedAssociationTask()
if (localAddress.address == bindAddress.address)
- log.info("Remoting started with transport [Artery {}]; listening on address [{}] with UID [{}]",
- settings.Transport,
- bindAddress.address,
- bindAddress.uid)
+ log.info(
+ "Remoting started with transport [Artery {}]; listening on address [{}] with UID [{}]",
+ settings.Transport,
+ bindAddress.address,
+ bindAddress.uid)
else {
log.info(
s"Remoting started with transport [Artery ${settings.Transport}]; listening on address [{}] and bound to [{}] with UID [{}]",
@@ -520,9 +531,10 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
// totalTimeout will be 0 when no tasks registered, so at least 3.seconds
val totalTimeout = coord.totalTimeout().max(3.seconds)
if (!coord.jvmHooksLatch.await(totalTimeout.toMillis, TimeUnit.MILLISECONDS))
- log.warning("CoordinatedShutdown took longer than [{}]. Shutting down [{}] via shutdownHook",
- totalTimeout,
- localAddress)
+ log.warning(
+ "CoordinatedShutdown took longer than [{}]. Shutting down [{}] via shutdownHook",
+ totalTimeout,
+ localAddress)
else
log.debug("Shutting down [{}] via shutdownHook", localAddress)
if (hasBeenShutdown.compareAndSet(false, true)) {
@@ -554,20 +566,22 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
}
}
} else
- log.debug("Discarding incoming ActorRef compression advertisement from [{}] that was " +
- "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]",
- from,
- table.originUid,
- localAddress.uid,
- table)
+ log.debug(
+ "Discarding incoming ActorRef compression advertisement from [{}] that was " +
+ "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]",
+ from,
+ table.originUid,
+ localAddress.uid,
+ table)
case ack: ActorRefCompressionAdvertisementAck =>
inboundCompressionAccess match {
case OptionVal.Some(access) => access.confirmActorRefCompressionAdvertisementAck(ack)
case _ =>
- log.debug(s"Received {} version: [{}] however no inbound compression access was present. " +
- s"ACK will not take effect, however it will be redelivered and likely to apply then.",
- Logging.simpleName(ack),
- ack.tableVersion)
+ log.debug(
+ s"Received {} version: [{}] however no inbound compression access was present. " +
+ s"ACK will not take effect, however it will be redelivered and likely to apply then.",
+ Logging.simpleName(ack),
+ ack.tableVersion)
}
case ClassManifestCompressionAdvertisement(from, table) =>
@@ -583,20 +597,22 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
}
}
} else
- log.debug("Discarding incoming Class Manifest compression advertisement from [{}] that was " +
- "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]",
- from,
- table.originUid,
- localAddress.uid,
- table)
+ log.debug(
+ "Discarding incoming Class Manifest compression advertisement from [{}] that was " +
+ "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]",
+ from,
+ table.originUid,
+ localAddress.uid,
+ table)
case ack: ClassManifestCompressionAdvertisementAck =>
inboundCompressionAccess match {
case OptionVal.Some(access) => access.confirmClassManifestCompressionAdvertisementAck(ack)
case _ =>
- log.debug(s"Received {} version: [{}] however no inbound compression access was present. " +
- s"ACK will not take effect, however it will be redelivered and likely to apply then.",
- Logging.simpleName(ack),
- ack.tableVersion)
+ log.debug(
+ s"Received {} version: [{}] however no inbound compression access was present. " +
+ s"ACK will not take effect, however it will be redelivered and likely to apply then.",
+ Logging.simpleName(ack),
+ ack.tableVersion)
}
}
@@ -620,9 +636,10 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
}
- protected def attachInboundStreamRestart(streamName: String,
- streamCompleted: Future[Done],
- restart: () => Unit): Unit = {
+ protected def attachInboundStreamRestart(
+ streamName: String,
+ streamCompleted: Future[Done],
+ restart: () => Unit): Unit = {
implicit val ec = materializer.executionContext
streamCompleted.failed.foreach {
case ShutdownSignal => // shutdown as expected
@@ -637,12 +654,13 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
topLevelFlightRecorder.loFreq(Transport_RestartInbound, s"$localAddress - $streamName")
restart()
} else {
- log.error(cause,
- "{} failed and restarted {} times within {} seconds. Terminating system. {}",
- streamName,
- settings.Advanced.InboundMaxRestarts,
- settings.Advanced.InboundRestartTimeout.toSeconds,
- cause.getMessage)
+ log.error(
+ cause,
+ "{} failed and restarted {} times within {} seconds. Terminating system. {}",
+ streamName,
+ settings.Advanced.InboundMaxRestarts,
+ settings.Advanced.InboundRestartTimeout.toSeconds,
+ cause.getMessage)
system.terminate()
}
}
@@ -802,28 +820,31 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
def outboundTransportSink(outboundContext: OutboundContext): Sink[EnvelopeBuffer, Future[Done]] =
outboundTransportSink(outboundContext, OrdinaryStreamId, envelopeBufferPool)
- protected def outboundTransportSink(outboundContext: OutboundContext,
- streamId: Int,
- bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]]
+ protected def outboundTransportSink(
+ outboundContext: OutboundContext,
+ streamId: Int,
+ bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]]
def outboundLane(
outboundContext: OutboundContext): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] =
outboundLane(outboundContext, envelopeBufferPool, OrdinaryStreamId)
- private def outboundLane(outboundContext: OutboundContext,
- bufferPool: EnvelopeBufferPool,
- streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = {
+ private def outboundLane(
+ outboundContext: OutboundContext,
+ bufferPool: EnvelopeBufferPool,
+ streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = {
Flow
.fromGraph(killSwitch.flow[OutboundEnvelope])
.via(
- new OutboundHandshake(system,
- outboundContext,
- outboundEnvelopePool,
- settings.Advanced.HandshakeTimeout,
- settings.Advanced.HandshakeRetryInterval,
- settings.Advanced.InjectHandshakeInterval,
- Duration.Undefined))
+ new OutboundHandshake(
+ system,
+ outboundContext,
+ outboundEnvelopePool,
+ settings.Advanced.HandshakeTimeout,
+ settings.Advanced.HandshakeRetryInterval,
+ settings.Advanced.InjectHandshakeInterval,
+ Duration.Undefined))
.viaMat(createEncoder(bufferPool, streamId))(Keep.right)
}
@@ -834,18 +855,20 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
Flow
.fromGraph(killSwitch.flow[OutboundEnvelope])
.via(
- new OutboundHandshake(system,
- outboundContext,
- outboundEnvelopePool,
- settings.Advanced.HandshakeTimeout,
- settings.Advanced.HandshakeRetryInterval,
- settings.Advanced.InjectHandshakeInterval,
- livenessProbeInterval))
+ new OutboundHandshake(
+ system,
+ outboundContext,
+ outboundEnvelopePool,
+ settings.Advanced.HandshakeTimeout,
+ settings.Advanced.HandshakeRetryInterval,
+ settings.Advanced.InjectHandshakeInterval,
+ livenessProbeInterval))
.via(
- new SystemMessageDelivery(outboundContext,
- system.deadLetters,
- settings.Advanced.SystemMessageResendInterval,
- settings.Advanced.SysMsgBufferSize))
+ new SystemMessageDelivery(
+ outboundContext,
+ system.deadLetters,
+ settings.Advanced.SystemMessageResendInterval,
+ settings.Advanced.SysMsgBufferSize))
// note that System messages must not be dropped before the SystemMessageDelivery stage
.via(outboundTestFlow(outboundContext))
.viaMat(new OutboundControlJunction(outboundContext, outboundEnvelopePool))(Keep.right)
@@ -855,8 +878,9 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
// TODO we can also add scrubbing stage that would collapse sys msg acks/nacks and remove duplicate Quarantine messages
}
- def createEncoder(pool: EnvelopeBufferPool,
- streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] =
+ def createEncoder(
+ pool: EnvelopeBufferPool,
+ streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] =
Flow.fromGraph(
new Encoder(localAddress, system, outboundEnvelopePool, pool, streamId, settings.LogSend, settings.Version))
diff --git a/akka-remote/src/main/scala/akka/remote/artery/Association.scala b/akka-remote/src/main/scala/akka/remote/artery/Association.scala
index 288e65da9f..6c666c9c2c 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/Association.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/Association.scala
@@ -107,9 +107,10 @@ private[remote] object Association {
case object OutboundStreamStopIdleSignal extends RuntimeException("") with StopSignal with NoStackTrace
case object OutboundStreamStopQuarantinedSignal extends RuntimeException("") with StopSignal with NoStackTrace
- final case class OutboundStreamMatValues(streamKillSwitch: OptionVal[SharedKillSwitch],
- completed: Future[Done],
- stopping: OptionVal[StopSignal])
+ final case class OutboundStreamMatValues(
+ streamKillSwitch: OptionVal[SharedKillSwitch],
+ completed: Future[Done],
+ stopping: OptionVal[StopSignal])
}
/**
@@ -118,14 +119,15 @@ private[remote] object Association {
* Thread-safe, mutable holder for association state. Main entry point for remote destined message to a specific
* remote address.
*/
-private[remote] class Association(val transport: ArteryTransport,
- val materializer: Materializer,
- val controlMaterializer: Materializer,
- override val remoteAddress: Address,
- override val controlSubject: ControlMessageSubject,
- largeMessageDestinations: WildcardIndex[NotUsed],
- priorityMessageDestinations: WildcardIndex[NotUsed],
- outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope])
+private[remote] class Association(
+ val transport: ArteryTransport,
+ val materializer: Materializer,
+ val controlMaterializer: Materializer,
+ override val remoteAddress: Address,
+ override val controlSubject: ControlMessageSubject,
+ largeMessageDestinations: WildcardIndex[NotUsed],
+ priorityMessageDestinations: WildcardIndex[NotUsed],
+ outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope])
extends AbstractAssociation
with OutboundContext {
import Association._
@@ -260,8 +262,9 @@ private[remote] class Association(val transport: ArteryTransport,
}
def completeHandshake(peer: UniqueAddress): Future[Done] = {
- require(remoteAddress == peer.address,
- s"wrong remote address in completeHandshake, got ${peer.address}, expected $remoteAddress")
+ require(
+ remoteAddress == peer.address,
+ s"wrong remote address in completeHandshake, got ${peer.address}, expected $remoteAddress")
val current = associationState
current.uniqueRemoteAddressValue() match {
@@ -284,11 +287,12 @@ private[remote] class Association(val transport: ArteryTransport,
current.uniqueRemoteAddressValue() match {
case Some(old) =>
cancelStopQuarantinedTimer()
- log.debug("Incarnation {} of association to [{}] with new UID [{}] (old UID [{}])",
- newState.incarnation,
- peer.address,
- peer.uid,
- old.uid)
+ log.debug(
+ "Incarnation {} of association to [{}] with new UID [{}] (old UID [{}])",
+ newState.incarnation,
+ peer.address,
+ peer.uid,
+ old.uid)
clearInboundCompression(old.uid)
case None =>
// Failed, nothing to do
@@ -334,11 +338,12 @@ private[remote] class Association(val transport: ArteryTransport,
val reason =
if (removed) "removed unused quarantined association"
else s"overflow of send queue, size [$qSize]"
- log.debug("Dropping message [{}] from [{}] to [{}] due to {}",
- Logging.messageClassName(message),
- sender.getOrElse(deadletters),
- recipient.getOrElse(recipient),
- reason)
+ log.debug(
+ "Dropping message [{}] from [{}] to [{}] due to {}",
+ Logging.messageClassName(message),
+ sender.getOrElse(deadletters),
+ recipient.getOrElse(recipient),
+ reason)
}
flightRecorder.hiFreq(Transport_SendQueueOverflow, queueIndex)
deadletters ! env
@@ -351,9 +356,10 @@ private[remote] class Association(val transport: ArteryTransport,
// allow ActorSelectionMessage to pass through quarantine, to be able to establish interaction with new system
if (message.isInstanceOf[ActorSelectionMessage] || !quarantined || messageIsClearSystemMessageDelivery) {
if (quarantined && !messageIsClearSystemMessageDelivery) {
- log.debug("Quarantine piercing attempt with message [{}] to [{}]",
- Logging.messageClassName(message),
- recipient.getOrElse(""))
+ log.debug(
+ "Quarantine piercing attempt with message [{}] to [{}]",
+ Logging.messageClassName(message),
+ recipient.getOrElse(""))
setupStopQuarantinedTimer()
}
try {
@@ -387,11 +393,12 @@ private[remote] class Association(val transport: ArteryTransport,
case ShuttingDown => // silence it
}
} else if (log.isDebugEnabled)
- log.debug("Dropping message [{}] from [{}] to [{}] due to quarantined system [{}]",
- Logging.messageClassName(message),
- sender.getOrElse(deadletters),
- recipient.getOrElse(recipient),
- remoteAddress)
+ log.debug(
+ "Dropping message [{}] from [{}] to [{}] due to quarantined system [{}]",
+ Logging.messageClassName(message),
+ sender.getOrElse(deadletters),
+ recipient.getOrElse(recipient),
+ remoteAddress)
}
private def selectQueue(recipient: OptionVal[RemoteActorRef]): Int = {
@@ -472,11 +479,12 @@ private[remote] class Association(val transport: ArteryTransport,
if (swapState(current, newState)) {
// quarantine state change was performed
if (harmless) {
- log.info("Association to [{}] having UID [{}] has been stopped. All " +
- "messages to this UID will be delivered to dead letters. Reason: {}",
- remoteAddress,
- u,
- reason)
+ log.info(
+ "Association to [{}] having UID [{}] has been stopped. All " +
+ "messages to this UID will be delivered to dead letters. Reason: {}",
+ remoteAddress,
+ u,
+ reason)
transport.system.eventStream
.publish(GracefulShutdownQuarantinedEvent(UniqueAddress(remoteAddress, u), reason))
} else {
@@ -704,11 +712,12 @@ private[remote] class Association(val transport: ArteryTransport,
updateStreamMatValues(ControlQueueIndex, streamKillSwitch, completed)
setupIdleTimer()
- attachOutboundStreamRestart("Outbound control stream",
- ControlQueueIndex,
- controlQueueSize,
- completed,
- () => runOutboundControlStream())
+ attachOutboundStreamRestart(
+ "Outbound control stream",
+ ControlQueueIndex,
+ controlQueueSize,
+ completed,
+ () => runOutboundControlStream())
}
private def getOrCreateQueueWrapper(queueIndex: Int, capacity: Int): QueueWrapper = {
@@ -748,11 +757,12 @@ private[remote] class Association(val transport: ArteryTransport,
outboundCompressionAccess = Vector(changeCompression)
updateStreamMatValues(OrdinaryQueueIndex, streamKillSwitch, completed)
- attachOutboundStreamRestart("Outbound message stream",
- OrdinaryQueueIndex,
- queueSize,
- completed,
- () => runOutboundOrdinaryMessagesStream())
+ attachOutboundStreamRestart(
+ "Outbound message stream",
+ OrdinaryQueueIndex,
+ queueSize,
+ completed,
+ () => runOutboundOrdinaryMessagesStream())
} else {
log.debug("Starting outbound message stream to [{}] with [{}] lanes", remoteAddress, outboundLanes)
@@ -811,11 +821,12 @@ private[remote] class Association(val transport: ArteryTransport,
outboundCompressionAccess = compressionAccessValues
- attachOutboundStreamRestart("Outbound message stream",
- OrdinaryQueueIndex,
- queueSize,
- allCompleted,
- () => runOutboundOrdinaryMessagesStream())
+ attachOutboundStreamRestart(
+ "Outbound message stream",
+ OrdinaryQueueIndex,
+ queueSize,
+ allCompleted,
+ () => runOutboundOrdinaryMessagesStream())
}
}
@@ -841,18 +852,20 @@ private[remote] class Association(val transport: ArteryTransport,
queuesVisibility = true // volatile write for visibility of the queues array
updateStreamMatValues(LargeQueueIndex, streamKillSwitch, completed)
- attachOutboundStreamRestart("Outbound large message stream",
- LargeQueueIndex,
- largeQueueSize,
- completed,
- () => runOutboundLargeMessagesStream())
+ attachOutboundStreamRestart(
+ "Outbound large message stream",
+ LargeQueueIndex,
+ largeQueueSize,
+ completed,
+ () => runOutboundLargeMessagesStream())
}
- private def attachOutboundStreamRestart(streamName: String,
- queueIndex: Int,
- queueCapacity: Int,
- streamCompleted: Future[Done],
- restart: () => Unit): Unit = {
+ private def attachOutboundStreamRestart(
+ streamName: String,
+ queueIndex: Int,
+ queueCapacity: Int,
+ streamCompleted: Future[Done],
+ restart: () => Unit): Unit = {
def lazyRestart(): Unit = {
flightRecorder.loFreq(Transport_RestartOutbound, s"$remoteAddress - $streamName")
@@ -926,29 +939,32 @@ private[remote] class Association(val transport: ArteryTransport,
log.debug("{} to [{}] was idle and stopped. It will be restarted if used again.", streamName, remoteAddress)
lazyRestart()
} else if (stoppedQuarantined) {
- log.debug("{} to [{}] was quarantined and stopped. It will be restarted if used again.",
- streamName,
- remoteAddress)
+ log.debug(
+ "{} to [{}] was quarantined and stopped. It will be restarted if used again.",
+ streamName,
+ remoteAddress)
lazyRestart()
} else if (bypassRestartCounter || restartCounter.restart()) {
log.error(cause, "{} to [{}] failed. Restarting it. {}", streamName, remoteAddress, cause.getMessage)
lazyRestart()
} else {
- log.error(cause,
- s"{} to [{}] failed and restarted {} times within {} seconds. Terminating system. ${cause.getMessage}",
- streamName,
- remoteAddress,
- advancedSettings.OutboundMaxRestarts,
- advancedSettings.OutboundRestartTimeout.toSeconds)
+ log.error(
+ cause,
+ s"{} to [{}] failed and restarted {} times within {} seconds. Terminating system. ${cause.getMessage}",
+ streamName,
+ remoteAddress,
+ advancedSettings.OutboundMaxRestarts,
+ advancedSettings.OutboundRestartTimeout.toSeconds)
cancelAllTimers()
transport.system.terminate()
}
}
}
- private def updateStreamMatValues(streamId: Int,
- streamKillSwitch: SharedKillSwitch,
- completed: Future[Done]): Unit = {
+ private def updateStreamMatValues(
+ streamId: Int,
+ streamKillSwitch: SharedKillSwitch,
+ completed: Future[Done]): Unit = {
implicit val ec = materializer.executionContext
updateStreamMatValues(streamId, OutboundStreamMatValues(OptionVal.Some(streamKillSwitch), completed.recover {
case _ => Done
diff --git a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala
index e3287a9629..48dc03ba41 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala
@@ -43,15 +43,17 @@ private[remote] object Encoder {
/**
* INTERNAL API
*/
-private[remote] class Encoder(uniqueLocalAddress: UniqueAddress,
- system: ExtendedActorSystem,
- outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope],
- bufferPool: EnvelopeBufferPool,
- @unused streamId: Int,
- debugLogSend: Boolean,
- version: Byte)
- extends GraphStageWithMaterializedValue[FlowShape[OutboundEnvelope, EnvelopeBuffer],
- Encoder.OutboundCompressionAccess] {
+private[remote] class Encoder(
+ uniqueLocalAddress: UniqueAddress,
+ system: ExtendedActorSystem,
+ outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope],
+ bufferPool: EnvelopeBufferPool,
+ @unused streamId: Int,
+ debugLogSend: Boolean,
+ version: Byte)
+ extends GraphStageWithMaterializedValue[
+ FlowShape[OutboundEnvelope, EnvelopeBuffer],
+ Encoder.OutboundCompressionAccess] {
import Encoder._
val in: Inlet[OutboundEnvelope] = Inlet("Artery.Encoder.in")
@@ -141,10 +143,11 @@ private[remote] class Encoder(uniqueLocalAddress: UniqueAddress,
envelope.byteBuffer.flip()
if (debugLogSendEnabled)
- log.debug("sending remote message [{}] to [{}] from [{}]",
- outboundEnvelope.message,
- outboundEnvelope.recipient.getOrElse(""),
- outboundEnvelope.sender.getOrElse(""))
+ log.debug(
+ "sending remote message [{}] to [{}] from [{}]",
+ outboundEnvelope.message,
+ outboundEnvelope.recipient.getOrElse(""),
+ outboundEnvelope.sender.getOrElse(""))
push(out, envelope)
@@ -153,18 +156,20 @@ private[remote] class Encoder(uniqueLocalAddress: UniqueAddress,
bufferPool.release(envelope)
outboundEnvelope.message match {
case _: SystemMessageEnvelope =>
- log.error(e,
- "Failed to serialize system message [{}].",
- Logging.messageClassName(outboundEnvelope.message))
+ log.error(
+ e,
+ "Failed to serialize system message [{}].",
+ Logging.messageClassName(outboundEnvelope.message))
throw e
case _ if e.isInstanceOf[java.nio.BufferOverflowException] =>
val reason = new OversizedPayloadException(
"Discarding oversized payload sent to " +
s"${outboundEnvelope.recipient}: max allowed size ${envelope.byteBuffer.limit()} " +
s"bytes. Message type [${Logging.messageClassName(outboundEnvelope.message)}].")
- log.error(reason,
- "Failed to serialize oversized message [{}].",
- Logging.messageClassName(outboundEnvelope.message))
+ log.error(
+ reason,
+ "Failed to serialize oversized message [{}].",
+ Logging.messageClassName(outboundEnvelope.message))
pull(in)
case _ =>
log.error(e, "Failed to serialize message [{}].", Logging.messageClassName(outboundEnvelope.message))
@@ -210,9 +215,10 @@ private[remote] class Encoder(uniqueLocalAddress: UniqueAddress,
* INTERNAL API
*/
private[remote] object Decoder {
- private final case class RetryResolveRemoteDeployedRecipient(attemptsLeft: Int,
- recipientPath: String,
- inboundEnvelope: InboundEnvelope)
+ private final case class RetryResolveRemoteDeployedRecipient(
+ attemptsLeft: Int,
+ recipientPath: String,
+ inboundEnvelope: InboundEnvelope)
private object Tick
@@ -310,8 +316,9 @@ private[remote] object Decoder {
/**
* INTERNAL API
*/
-private[remote] final class ActorRefResolveCacheWithAddress(provider: RemoteActorRefProvider,
- localAddress: UniqueAddress)
+private[remote] final class ActorRefResolveCacheWithAddress(
+ provider: RemoteActorRefProvider,
+ localAddress: UniqueAddress)
extends LruBoundedCache[String, InternalActorRef](capacity = 1024, evictAgeThreshold = 600) {
override protected def compute(k: String): InternalActorRef =
@@ -325,12 +332,13 @@ private[remote] final class ActorRefResolveCacheWithAddress(provider: RemoteActo
/**
* INTERNAL API
*/
-private[remote] class Decoder(inboundContext: InboundContext,
- system: ExtendedActorSystem,
- uniqueLocalAddress: UniqueAddress,
- settings: ArterySettings,
- inboundCompressions: InboundCompressions,
- inEnvelopePool: ObjectPool[ReusableInboundEnvelope])
+private[remote] class Decoder(
+ inboundContext: InboundContext,
+ system: ExtendedActorSystem,
+ uniqueLocalAddress: UniqueAddress,
+ settings: ArterySettings,
+ inboundCompressions: InboundCompressions,
+ inEnvelopePool: ObjectPool[ReusableInboundEnvelope])
extends GraphStageWithMaterializedValue[FlowShape[EnvelopeBuffer, InboundEnvelope], InboundCompressionAccess] {
import Decoder.Tick
@@ -472,15 +480,16 @@ private[remote] class Decoder(inboundContext: InboundContext,
val decoded = inEnvelopePool
.acquire()
- .init(recipient,
- sender,
- originUid,
- headerBuilder.serializer,
- classManifest,
- headerBuilder.flags,
- envelope,
- association,
- lane = 0)
+ .init(
+ recipient,
+ sender,
+ originUid,
+ headerBuilder.serializer,
+ classManifest,
+ headerBuilder.flags,
+ envelope,
+ association,
+ lane = 0)
if (recipient.isEmpty && !headerBuilder.isNoRecipient) {
@@ -497,20 +506,23 @@ private[remote] class Decoder(inboundContext: InboundContext,
case OptionVal.Some(path) =>
val ref = actorRefResolver.getOrCompute(path)
if (ref.isInstanceOf[EmptyLocalActorRef])
- log.warning("Message for banned (terminated, unresolved) remote deployed recipient [{}].",
- recipientActorRefPath)
+ log.warning(
+ "Message for banned (terminated, unresolved) remote deployed recipient [{}].",
+ recipientActorRefPath)
push(out, decoded.withRecipient(ref))
case OptionVal.None =>
- log.warning("Dropping message for banned (terminated, unresolved) remote deployed recipient [{}].",
- recipientActorRefPath)
+ log.warning(
+ "Dropping message for banned (terminated, unresolved) remote deployed recipient [{}].",
+ recipientActorRefPath)
pull(in)
}
} else
scheduleOnce(
- RetryResolveRemoteDeployedRecipient(retryResolveRemoteDeployedRecipientAttempts,
- recipientActorRefPath,
- decoded),
+ RetryResolveRemoteDeployedRecipient(
+ retryResolveRemoteDeployedRecipientAttempts,
+ recipientActorRefPath,
+ decoded),
retryResolveRemoteDeployedRecipientInterval)
} else {
push(out, decoded)
@@ -565,8 +577,9 @@ private[remote] class Decoder(inboundContext: InboundContext,
resolveRecipient(recipientPath) match {
case OptionVal.None =>
if (attemptsLeft > 0)
- scheduleOnce(RetryResolveRemoteDeployedRecipient(attemptsLeft - 1, recipientPath, inboundEnvelope),
- retryResolveRemoteDeployedRecipientInterval)
+ scheduleOnce(
+ RetryResolveRemoteDeployedRecipient(attemptsLeft - 1, recipientPath, inboundEnvelope),
+ retryResolveRemoteDeployedRecipientInterval)
else {
// No more attempts left. If the retried resolve isn't successful the ref is banned and
// we will not do the delayed retry resolve again. The reason for that is
@@ -598,9 +611,10 @@ private[remote] class Decoder(inboundContext: InboundContext,
/**
* INTERNAL API
*/
-private[remote] class Deserializer(@unused inboundContext: InboundContext,
- system: ExtendedActorSystem,
- bufferPool: EnvelopeBufferPool)
+private[remote] class Deserializer(
+ @unused inboundContext: InboundContext,
+ system: ExtendedActorSystem,
+ bufferPool: EnvelopeBufferPool)
extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] {
val in: Inlet[InboundEnvelope] = Inlet("Artery.Deserializer.in")
@@ -629,12 +643,13 @@ private[remote] class Deserializer(@unused inboundContext: InboundContext,
try {
val startTime: Long = if (instruments.timeSerialization) System.nanoTime else 0
- val deserializedMessage = MessageSerializer.deserializeForArtery(system,
- envelope.originUid,
- serialization,
- envelope.serializer,
- envelope.classManifest,
- envelope.envelopeBuffer)
+ val deserializedMessage = MessageSerializer.deserializeForArtery(
+ system,
+ envelope.originUid,
+ serialization,
+ envelope.serializer,
+ envelope.classManifest,
+ envelope.envelopeBuffer)
val envelopeWithMessage = envelope.withMessage(deserializedMessage)
@@ -650,11 +665,12 @@ private[remote] class Deserializer(@unused inboundContext: InboundContext,
case OptionVal.Some(a) => a.remoteAddress
case OptionVal.None => "unknown"
}
- log.warning("Failed to deserialize message from [{}] with serializer id [{}] and manifest [{}]. {}",
- from,
- envelope.serializer,
- envelope.classManifest,
- e)
+ log.warning(
+ "Failed to deserialize message from [{}] with serializer id [{}] and manifest [{}]. {}",
+ from,
+ envelope.serializer,
+ envelope.classManifest,
+ e)
pull(in)
} finally {
val buf = envelope.envelopeBuffer
@@ -675,10 +691,11 @@ private[remote] class Deserializer(@unused inboundContext: InboundContext,
* that an application message arrives in the InboundHandshake operator before the
* handshake is completed and then it would be dropped.
*/
-private[remote] class DuplicateHandshakeReq(numberOfLanes: Int,
- inboundContext: InboundContext,
- system: ExtendedActorSystem,
- bufferPool: EnvelopeBufferPool)
+private[remote] class DuplicateHandshakeReq(
+ numberOfLanes: Int,
+ inboundContext: InboundContext,
+ system: ExtendedActorSystem,
+ bufferPool: EnvelopeBufferPool)
extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] {
val in: Inlet[InboundEnvelope] = Inlet("Artery.DuplicateHandshakeReq.in")
diff --git a/akka-remote/src/main/scala/akka/remote/artery/Control.scala b/akka-remote/src/main/scala/akka/remote/artery/Control.scala
index 96c70d1b9f..02b6301ce7 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/Control.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/Control.scala
@@ -88,8 +88,9 @@ private[remote] object InboundControlJunction {
* INTERNAL API
*/
private[remote] class InboundControlJunction
- extends GraphStageWithMaterializedValue[FlowShape[InboundEnvelope, InboundEnvelope],
- InboundControlJunction.ControlMessageSubject] {
+ extends GraphStageWithMaterializedValue[
+ FlowShape[InboundEnvelope, InboundEnvelope],
+ InboundControlJunction.ControlMessageSubject] {
import InboundControlJunction._
val in: Inlet[InboundEnvelope] = Inlet("InboundControlJunction.in")
@@ -158,10 +159,12 @@ private[remote] object OutboundControlJunction {
/**
* INTERNAL API
*/
-private[remote] class OutboundControlJunction(outboundContext: OutboundContext,
- outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope])
- extends GraphStageWithMaterializedValue[FlowShape[OutboundEnvelope, OutboundEnvelope],
- OutboundControlJunction.OutboundControlIngress] {
+private[remote] class OutboundControlJunction(
+ outboundContext: OutboundContext,
+ outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope])
+ extends GraphStageWithMaterializedValue[
+ FlowShape[OutboundEnvelope, OutboundEnvelope],
+ OutboundControlJunction.OutboundControlIngress] {
import OutboundControlJunction._
val in: Inlet[OutboundEnvelope] = Inlet("OutboundControlJunction.in")
val out: Outlet[OutboundEnvelope] = Outlet("OutboundControlJunction.out")
diff --git a/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala b/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala
index 2fb31ccae9..0602dc91ce 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala
@@ -215,9 +215,10 @@ private[remote] final class SerializationFormatCache
/**
* INTERNAL API
*/
-private[remote] final class HeaderBuilderImpl(inboundCompression: InboundCompressions,
- var _outboundActorRefCompression: CompressionTable[ActorRef],
- var _outboundClassManifestCompression: CompressionTable[String])
+private[remote] final class HeaderBuilderImpl(
+ inboundCompression: InboundCompressions,
+ var _outboundActorRefCompression: CompressionTable[ActorRef],
+ var _outboundClassManifestCompression: CompressionTable[String])
extends HeaderBuilder {
import HeaderBuilder.DeadLettersCode
diff --git a/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala
index 1553ae8ee5..90ae290f6f 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala
@@ -13,12 +13,11 @@ import org.agrona.concurrent.OneToOneConcurrentArrayQueue
*/
@InternalApi private[akka] class FixedSizePartitionHub[T](partitioner: T => Int, lanes: Int, bufferSize: Int)
extends PartitionHub[T](
- // during tear down or restart it's possible that some streams have been removed
- // and then we must drop elements (return -1)
- () =>
- (info, elem) => if (info.size < lanes) -1 else info.consumerIdByIdx(partitioner(elem)),
- lanes,
- bufferSize - 1) {
+ // during tear down or restart it's possible that some streams have been removed
+ // and then we must drop elements (return -1)
+ () => (info, elem) => if (info.size < lanes) -1 else info.consumerIdByIdx(partitioner(elem)),
+ lanes,
+ bufferSize - 1) {
// -1 because of the Completed token
override def createQueue(): PartitionHub.Internal.PartitionQueue =
diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala
index 4e2e3d2a6c..9bf3e5f8f0 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala
@@ -137,11 +137,12 @@ private[remote] object RollingEventLogSection {
/**
* INTERNAL API
*/
-private[remote] class RollingEventLogSection(fileChannel: FileChannel,
- offset: Long,
- entryCount: Long,
- logBufferSize: Long,
- recordSize: Int) {
+private[remote] class RollingEventLogSection(
+ fileChannel: FileChannel,
+ offset: Long,
+ entryCount: Long,
+ logBufferSize: Long,
+ recordSize: Int) {
import RollingEventLogSection._
require(entryCount > 0, "entryCount must be greater than 0")
@@ -294,23 +295,26 @@ private[remote] class FlightRecorder(val fileChannel: FileChannel)
require((SnapshotCount & (SnapshotCount - 1)) == 0, "SnapshotCount must be power of two")
private[this] val SnapshotMask = SnapshotCount - 1
private[this] val alertLogs =
- new RollingEventLogSection(fileChannel = fileChannel,
- offset = AlertSectionOffset,
- entryCount = AlertWindow,
- logBufferSize = AlertLogSize,
- recordSize = AlertRecordSize)
+ new RollingEventLogSection(
+ fileChannel = fileChannel,
+ offset = AlertSectionOffset,
+ entryCount = AlertWindow,
+ logBufferSize = AlertLogSize,
+ recordSize = AlertRecordSize)
private[this] val loFreqLogs =
- new RollingEventLogSection(fileChannel = fileChannel,
- offset = LoFreqSectionOffset,
- entryCount = LoFreqWindow,
- logBufferSize = LoFreqLogSize,
- recordSize = LoFreqRecordSize)
+ new RollingEventLogSection(
+ fileChannel = fileChannel,
+ offset = LoFreqSectionOffset,
+ entryCount = LoFreqWindow,
+ logBufferSize = LoFreqLogSize,
+ recordSize = LoFreqRecordSize)
private[this] val hiFreqLogs =
- new RollingEventLogSection(fileChannel = fileChannel,
- offset = HiFreqSectionOffset,
- entryCount = HiFreqWindow,
- logBufferSize = HiFreqLogSize,
- recordSize = HiFreqRecordSize)
+ new RollingEventLogSection(
+ fileChannel = fileChannel,
+ offset = HiFreqSectionOffset,
+ entryCount = HiFreqWindow,
+ logBufferSize = HiFreqLogSize,
+ recordSize = HiFreqRecordSize)
// No need for volatile, guarded by atomic CAS and set
@volatile private var currentLog = 0
diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala
index f7a03c4b3d..17ffb29c07 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala
@@ -69,56 +69,57 @@ private[remote] object FlightRecorderEvents {
val TcpInbound_Received = 173
// Used for presentation of the entries in the flight recorder
- lazy val eventDictionary = Map(Transport_MediaDriverStarted -> "Transport: Media driver started",
- Transport_Started -> "Transport: started",
- Transport_AeronErrorLogStarted -> "Transport: Aeron error log started",
- Transport_TaskRunnerStarted -> "Transport: Task runner started",
- Transport_UniqueAddressSet -> "Transport: Unique address set",
- Transport_MaterializerStarted -> "Transport: Materializer started",
- Transport_StartupFinished -> "Transport: Startup finished",
- Transport_OnAvailableImage -> "Transport: onAvailableImage",
- Transport_KillSwitchPulled -> "Transport: KillSwitch pulled",
- Transport_Stopped -> "Transport: Stopped",
- Transport_AeronErrorLogTaskStopped -> "Transport: Aeron errorLog task stopped",
- Transport_MediaFileDeleted -> "Transport: Media file deleted",
- Transport_FlightRecorderClose -> "Transport: Flight recorder closed",
- Transport_SendQueueOverflow -> "Transport: Send queue overflow",
- Transport_StopIdleOutbound -> "Transport: Remove idle outbound",
- Transport_Quarantined -> "Transport: Quarantined association",
- Transport_RemovedQuarantined -> "Transport: Removed idle quarantined association",
- Transport_RestartOutbound -> "Transport: Restart outbound",
- Transport_RestartInbound -> "Transport: Restart outbound",
- // Aeron Sink events
- AeronSink_Started -> "AeronSink: Started",
- AeronSink_TaskRunnerRemoved -> "AeronSink: Task runner removed",
- AeronSink_PublicationClosed -> "AeronSink: Publication closed",
- AeronSink_Stopped -> "AeronSink: Stopped",
- AeronSink_EnvelopeGrabbed -> "AeronSink: Envelope grabbed",
- AeronSink_EnvelopeOffered -> "AeronSink: Envelope offered",
- AeronSink_GaveUpEnvelope -> "AeronSink: Gave up envelope",
- AeronSink_DelegateToTaskRunner -> "AeronSink: Delegate to task runner",
- AeronSink_ReturnFromTaskRunner -> "AeronSink: Return from task runner",
- // Aeron Source events
- AeronSource_Started -> "AeronSource: Started",
- AeronSource_Stopped -> "AeronSource: Stopped",
- AeronSource_Received -> "AeronSource: Received",
- AeronSource_DelegateToTaskRunner -> "AeronSource: Delegate to task runner",
- AeronSource_ReturnFromTaskRunner -> "AeronSource: Return from task runner",
- // Compression events
- Compression_CompressedActorRef -> "Compression: Compressed ActorRef",
- Compression_AllocatedActorRefCompressionId -> "Compression: Allocated ActorRef compression id",
- Compression_CompressedManifest -> "Compression: Compressed manifest",
- Compression_AllocatedManifestCompressionId -> "Compression: Allocated manifest compression id",
- Compression_Inbound_RunActorRefAdvertisement -> "InboundCompression: Run class manifest compression advertisement",
- Compression_Inbound_RunClassManifestAdvertisement -> "InboundCompression: Run class manifest compression advertisement",
- // TCP outbound events
- TcpOutbound_Connected -> "TCP out: Connected",
- TcpOutbound_Sent -> "TCP out: Sent message",
- // TCP inbound events
- TcpInbound_Bound -> "TCP in: Bound",
- TcpInbound_Unbound -> "TCP in: Unbound",
- TcpInbound_Connected -> "TCP in: New connection",
- TcpInbound_Received -> "TCP in: Received message").map {
+ lazy val eventDictionary = Map(
+ Transport_MediaDriverStarted -> "Transport: Media driver started",
+ Transport_Started -> "Transport: started",
+ Transport_AeronErrorLogStarted -> "Transport: Aeron error log started",
+ Transport_TaskRunnerStarted -> "Transport: Task runner started",
+ Transport_UniqueAddressSet -> "Transport: Unique address set",
+ Transport_MaterializerStarted -> "Transport: Materializer started",
+ Transport_StartupFinished -> "Transport: Startup finished",
+ Transport_OnAvailableImage -> "Transport: onAvailableImage",
+ Transport_KillSwitchPulled -> "Transport: KillSwitch pulled",
+ Transport_Stopped -> "Transport: Stopped",
+ Transport_AeronErrorLogTaskStopped -> "Transport: Aeron errorLog task stopped",
+ Transport_MediaFileDeleted -> "Transport: Media file deleted",
+ Transport_FlightRecorderClose -> "Transport: Flight recorder closed",
+ Transport_SendQueueOverflow -> "Transport: Send queue overflow",
+ Transport_StopIdleOutbound -> "Transport: Remove idle outbound",
+ Transport_Quarantined -> "Transport: Quarantined association",
+ Transport_RemovedQuarantined -> "Transport: Removed idle quarantined association",
+ Transport_RestartOutbound -> "Transport: Restart outbound",
+ Transport_RestartInbound -> "Transport: Restart outbound",
+ // Aeron Sink events
+ AeronSink_Started -> "AeronSink: Started",
+ AeronSink_TaskRunnerRemoved -> "AeronSink: Task runner removed",
+ AeronSink_PublicationClosed -> "AeronSink: Publication closed",
+ AeronSink_Stopped -> "AeronSink: Stopped",
+ AeronSink_EnvelopeGrabbed -> "AeronSink: Envelope grabbed",
+ AeronSink_EnvelopeOffered -> "AeronSink: Envelope offered",
+ AeronSink_GaveUpEnvelope -> "AeronSink: Gave up envelope",
+ AeronSink_DelegateToTaskRunner -> "AeronSink: Delegate to task runner",
+ AeronSink_ReturnFromTaskRunner -> "AeronSink: Return from task runner",
+ // Aeron Source events
+ AeronSource_Started -> "AeronSource: Started",
+ AeronSource_Stopped -> "AeronSource: Stopped",
+ AeronSource_Received -> "AeronSource: Received",
+ AeronSource_DelegateToTaskRunner -> "AeronSource: Delegate to task runner",
+ AeronSource_ReturnFromTaskRunner -> "AeronSource: Return from task runner",
+ // Compression events
+ Compression_CompressedActorRef -> "Compression: Compressed ActorRef",
+ Compression_AllocatedActorRefCompressionId -> "Compression: Allocated ActorRef compression id",
+ Compression_CompressedManifest -> "Compression: Compressed manifest",
+ Compression_AllocatedManifestCompressionId -> "Compression: Allocated manifest compression id",
+ Compression_Inbound_RunActorRefAdvertisement -> "InboundCompression: Run class manifest compression advertisement",
+ Compression_Inbound_RunClassManifestAdvertisement -> "InboundCompression: Run class manifest compression advertisement",
+ // TCP outbound events
+ TcpOutbound_Connected -> "TCP out: Connected",
+ TcpOutbound_Sent -> "TCP out: Sent message",
+ // TCP inbound events
+ TcpInbound_Bound -> "TCP in: Bound",
+ TcpInbound_Unbound -> "TCP in: Unbound",
+ TcpInbound_Connected -> "TCP in: New connection",
+ TcpInbound_Received -> "TCP in: Received message").map {
case (int, str) => int.toLong -> str
}
diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala
index f7a9a9fea1..fc03849127 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala
@@ -35,12 +35,13 @@ private[akka] object FlightRecorderReader {
case object Live extends LogState
case object Snapshot extends LogState
- case class SectionParameters(offset: Long,
- sectionSize: Long,
- logSize: Long,
- window: Long,
- recordSize: Long,
- entriesPerRecord: Long) {
+ case class SectionParameters(
+ offset: Long,
+ sectionSize: Long,
+ logSize: Long,
+ window: Long,
+ recordSize: Long,
+ entriesPerRecord: Long) {
override def toString: String =
s"""
| offset = $offset
@@ -53,26 +54,29 @@ private[akka] object FlightRecorderReader {
""".stripMargin
}
- val AlertSectionParameters = SectionParameters(offset = AlertSectionOffset,
- sectionSize = AlertSectionSize,
- logSize = AlertLogSize,
- window = AlertWindow,
- recordSize = AlertRecordSize,
- entriesPerRecord = 1)
+ val AlertSectionParameters = SectionParameters(
+ offset = AlertSectionOffset,
+ sectionSize = AlertSectionSize,
+ logSize = AlertLogSize,
+ window = AlertWindow,
+ recordSize = AlertRecordSize,
+ entriesPerRecord = 1)
- val LoFreqSectionParameters = SectionParameters(offset = LoFreqSectionOffset,
- sectionSize = LoFreqSectionSize,
- logSize = LoFreqLogSize,
- window = LoFreqWindow,
- recordSize = LoFreqRecordSize,
- entriesPerRecord = 1)
+ val LoFreqSectionParameters = SectionParameters(
+ offset = LoFreqSectionOffset,
+ sectionSize = LoFreqSectionSize,
+ logSize = LoFreqLogSize,
+ window = LoFreqWindow,
+ recordSize = LoFreqRecordSize,
+ entriesPerRecord = 1)
- val HiFreqSectionParameters = SectionParameters(offset = HiFreqSectionOffset,
- sectionSize = HiFreqSectionSize,
- logSize = HiFreqLogSize,
- window = HiFreqWindow,
- recordSize = HiFreqRecordSize,
- entriesPerRecord = HiFreqBatchSize)
+ val HiFreqSectionParameters = SectionParameters(
+ offset = HiFreqSectionOffset,
+ sectionSize = HiFreqSectionSize,
+ logSize = HiFreqLogSize,
+ window = HiFreqWindow,
+ recordSize = HiFreqRecordSize,
+ entriesPerRecord = HiFreqBatchSize)
def dumpToStdout(flightRecorderFile: Path): Unit = {
var raFile: RandomAccessFile = null
@@ -189,10 +193,11 @@ private[akka] final class FlightRecorderReader(fileChannel: FileChannel) {
override def next(): CompactEntry = {
if (entriesLeft == -1L) readHeader()
- val entry = CompactEntry(timeStamp,
- dirty,
- code = fileBuffer.getLong(entryOffset),
- param = fileBuffer.getLong(entryOffset + 8))
+ val entry = CompactEntry(
+ timeStamp,
+ dirty,
+ code = fileBuffer.getLong(entryOffset),
+ param = fileBuffer.getLong(entryOffset + 8))
entriesLeft -= 1
if (entriesLeft == 0) {
diff --git a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala
index 25c6dcad9d..6afb1d14db 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala
@@ -48,13 +48,14 @@ private[remote] object OutboundHandshake {
/**
* INTERNAL API
*/
-private[remote] class OutboundHandshake(@unused system: ActorSystem,
- outboundContext: OutboundContext,
- outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope],
- timeout: FiniteDuration,
- retryInterval: FiniteDuration,
- injectHandshakeInterval: FiniteDuration,
- livenessProbeInterval: Duration)
+private[remote] class OutboundHandshake(
+ @unused system: ActorSystem,
+ outboundContext: OutboundContext,
+ outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope],
+ timeout: FiniteDuration,
+ retryInterval: FiniteDuration,
+ injectHandshakeInterval: FiniteDuration,
+ livenessProbeInterval: Duration)
extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] {
val in: Inlet[OutboundEnvelope] = Inlet("OutboundHandshake.in")
@@ -159,9 +160,10 @@ private[remote] class OutboundHandshake(@unused system: ActorSystem,
if (handshakeState == Completed && isAvailable(out) && pendingMessage.isEmpty) {
val lastUsedDuration = (System.nanoTime() - outboundContext.associationState.lastUsedTimestamp.get()).nanos
if (lastUsedDuration >= livenessProbeInterval) {
- log.info("Association to [{}] has been idle for [{}] seconds, sending HandshakeReq to validate liveness",
- outboundContext.remoteAddress,
- lastUsedDuration.toSeconds)
+ log.info(
+ "Association to [{}] has been idle for [{}] seconds, sending HandshakeReq to validate liveness",
+ outboundContext.remoteAddress,
+ lastUsedDuration.toSeconds)
push(out, createHandshakeReqEnvelope())
}
}
@@ -170,9 +172,10 @@ private[remote] class OutboundHandshake(@unused system: ActorSystem,
private def createHandshakeReqEnvelope(): OutboundEnvelope = {
outboundEnvelopePool
.acquire()
- .init(recipient = OptionVal.None,
- message = HandshakeReq(outboundContext.localAddress, outboundContext.remoteAddress),
- sender = OptionVal.None)
+ .init(
+ recipient = OptionVal.None,
+ message = HandshakeReq(outboundContext.localAddress, outboundContext.remoteAddress),
+ sender = OptionVal.None)
}
private def handshakeCompleted(): Unit = {
@@ -217,30 +220,27 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl
// InHandler
if (inControlStream)
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = {
- val env = grab(in)
- env.message match {
- case HandshakeReq(from, to) => onHandshakeReq(from, to)
- case HandshakeRsp(from) =>
- // Touch the lastUsedTimestamp here also because when sending the extra low frequency HandshakeRsp
- // the timestamp is not supposed to be updated when sending but when receiving reply, which confirms
- // that the other system is alive.
- inboundContext
- .association(from.address)
- .associationState
- .lastUsedTimestamp
- .set(System.nanoTime())
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush(): Unit = {
+ val env = grab(in)
+ env.message match {
+ case HandshakeReq(from, to) => onHandshakeReq(from, to)
+ case HandshakeRsp(from) =>
+ // Touch the lastUsedTimestamp here also because when sending the extra low frequency HandshakeRsp
+ // the timestamp is not supposed to be updated when sending but when receiving reply, which confirms
+ // that the other system is alive.
+ inboundContext.association(from.address).associationState.lastUsedTimestamp.set(System.nanoTime())
- after(inboundContext.completeHandshake(from)) {
- pull(in)
- }
- case _ =>
- onMessage(env)
- }
- }
- })
+ after(inboundContext.completeHandshake(from)) {
+ pull(in)
+ }
+ case _ =>
+ onMessage(env)
+ }
+ }
+ })
else
setHandler(in, new InHandler {
override def onPush(): Unit = {
@@ -260,14 +260,15 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl
pull(in)
}
} else {
- log.warning("Dropping Handshake Request from [{}] addressed to unknown local address [{}]. " +
- "Local address is [{}]. Check that the sending system uses the same " +
- "address to contact recipient system as defined in the " +
- "'akka.remote.artery.canonical.hostname' of the recipient system. " +
- "The name of the ActorSystem must also match.",
- from,
- to,
- inboundContext.localAddress.address)
+ log.warning(
+ "Dropping Handshake Request from [{}] addressed to unknown local address [{}]. " +
+ "Local address is [{}]. Check that the sending system uses the same " +
+ "address to contact recipient system as defined in the " +
+ "'akka.remote.artery.canonical.hostname' of the recipient system. " +
+ "The name of the ActorSystem must also match.",
+ from,
+ to,
+ inboundContext.localAddress.address)
pull(in)
}
@@ -294,12 +295,13 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl
push(out, env)
else {
if (log.isDebugEnabled)
- log.debug(s"Dropping message [{}] from unknown system with UID [{}]. " +
- "This system with UID [{}] was probably restarted. " +
- "Messages will be accepted when new handshake has been completed.",
- env.message.getClass.getName,
- env.originUid,
- inboundContext.localAddress.uid)
+ log.debug(
+ s"Dropping message [{}] from unknown system with UID [{}]. " +
+ "This system with UID [{}] was probably restarted. " +
+ "Messages will be accepted when new handshake has been completed.",
+ env.message.getClass.getName,
+ env.originUid,
+ inboundContext.localAddress.uid)
pull(in)
}
}
diff --git a/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala b/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala
index 5ed9d67c16..ef8ef02f70 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala
@@ -17,11 +17,12 @@ private[remote] object InboundEnvelope {
/**
* Only used in tests
*/
- def apply(recipient: OptionVal[InternalActorRef],
- message: AnyRef,
- sender: OptionVal[ActorRef],
- originUid: Long,
- association: OptionVal[OutboundContext]): InboundEnvelope = {
+ def apply(
+ recipient: OptionVal[InternalActorRef],
+ message: AnyRef,
+ sender: OptionVal[ActorRef],
+ originUid: Long,
+ association: OptionVal[OutboundContext]): InboundEnvelope = {
val env = new ReusableInboundEnvelope
env.init(recipient, sender, originUid, -1, "", 0, null, association, lane = 0).withMessage(message)
}
@@ -60,10 +61,10 @@ private[remote] trait InboundEnvelope extends NoSerializationVerificationNeeded
*/
private[remote] object ReusableInboundEnvelope {
def createObjectPool(capacity: Int) =
- new ObjectPool[ReusableInboundEnvelope](capacity,
- create = () => new ReusableInboundEnvelope,
- clear = inEnvelope =>
- inEnvelope.asInstanceOf[ReusableInboundEnvelope].clear())
+ new ObjectPool[ReusableInboundEnvelope](
+ capacity,
+ create = () => new ReusableInboundEnvelope,
+ clear = inEnvelope => inEnvelope.asInstanceOf[ReusableInboundEnvelope].clear())
}
/**
@@ -119,15 +120,16 @@ private[remote] final class ReusableInboundEnvelope extends InboundEnvelope {
_lane = 0
}
- def init(recipient: OptionVal[InternalActorRef],
- sender: OptionVal[ActorRef],
- originUid: Long,
- serializer: Int,
- classManifest: String,
- flags: Byte,
- envelopeBuffer: EnvelopeBuffer,
- association: OptionVal[OutboundContext],
- lane: Int): InboundEnvelope = {
+ def init(
+ recipient: OptionVal[InternalActorRef],
+ sender: OptionVal[ActorRef],
+ originUid: Long,
+ serializer: Int,
+ classManifest: String,
+ flags: Byte,
+ envelopeBuffer: EnvelopeBuffer,
+ association: OptionVal[OutboundContext],
+ lane: Int): InboundEnvelope = {
_recipient = recipient
_sender = sender
_originUid = originUid
diff --git a/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala b/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala
index cc52ebaf5e..015bfc5669 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala
@@ -39,10 +39,11 @@ private[remote] class InboundQuarantineCheck(inboundContext: InboundContext)
case OptionVal.Some(association) =>
if (association.associationState.isQuarantined(env.originUid)) {
if (log.isDebugEnabled)
- log.debug("Dropping message [{}] from [{}#{}] because the system is quarantined",
- Logging.messageClassName(env.message),
- association.remoteAddress,
- env.originUid)
+ log.debug(
+ "Dropping message [{}] from [{}#{}] because the system is quarantined",
+ Logging.messageClassName(env.message),
+ association.remoteAddress,
+ env.originUid)
// avoid starting outbound stream for heartbeats
if (!env.message.isInstanceOf[Quarantined] && !isHeartbeat(env.message))
inboundContext.sendControl(
diff --git a/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala b/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala
index 2d984a7574..f4d6d0715b 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala
@@ -23,8 +23,9 @@ private[akka] case class CacheStatistics(entries: Int, maxProbeDistance: Int, av
* to kick out entires that are considered old. The implementation tries to keep the map close to full, only evicting
* old entries when needed.
*/
-private[akka] abstract class LruBoundedCache[K: ClassTag, V <: AnyRef: ClassTag](capacity: Int,
- evictAgeThreshold: Int) {
+private[akka] abstract class LruBoundedCache[K: ClassTag, V <: AnyRef: ClassTag](
+ capacity: Int,
+ evictAgeThreshold: Int) {
require(capacity > 0, "Capacity must be larger than zero")
require((capacity & (capacity - 1)) == 0, "Capacity must be power of two")
require(evictAgeThreshold <= capacity, "Age threshold must be less than capacity.")
diff --git a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala
index f343a5bcd1..63ee287948 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala
@@ -48,9 +48,10 @@ private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: R
log.debug(LogMarker.Security, "dropping daemon message [{}] in untrusted mode", messageClassName(message))
} else {
if (LogReceive && debugLogEnabled)
- log.debug("received daemon message [{}] from [{}]",
- message,
- senderOption.getOrElse(originAddress.getOrElse("")))
+ log.debug(
+ "received daemon message [{}] from [{}]",
+ message,
+ senderOption.getOrElse(originAddress.getOrElse("")))
remoteDaemon ! message
}
@@ -62,10 +63,11 @@ private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: R
if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) ||
sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) {
if (debugLogEnabled)
- log.debug(LogMarker.Security,
- "operating in UntrustedMode, dropping inbound actor selection to [{}], " +
- "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration",
- sel.elements.mkString("/", "/", ""))
+ log.debug(
+ LogMarker.Security,
+ "operating in UntrustedMode, dropping inbound actor selection to [{}], " +
+ "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration",
+ sel.elements.mkString("/", "/", ""))
} else
// run the receive logic for ActorSelectionMessage here to make sure it is not stuck on busy user actor
ActorSelection.deliverSelection(l, sender, sel)
@@ -83,18 +85,20 @@ private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: R
case r @ (_: RemoteRef | _: RepointableRef) if !r.isLocal && !UntrustedMode =>
if (LogReceive && debugLogEnabled)
- log.debug("received remote-destined message [{}] to [{}] from [{}]",
- message,
- recipient,
- senderOption.getOrElse(originAddress.getOrElse("")))
+ log.debug(
+ "received remote-destined message [{}] to [{}] from [{}]",
+ message,
+ recipient,
+ senderOption.getOrElse(originAddress.getOrElse("")))
// if it was originally addressed to us but is in fact remote from our point of view (i.e. remote-deployed)
r.!(message)(sender)
case r =>
- log.error("dropping message [{}] for unknown recipient [{}] from [{}]",
- messageClassName(message),
- r,
- senderOption.getOrElse(originAddress.getOrElse("")))
+ log.error(
+ "dropping message [{}] for unknown recipient [{}] from [{}]",
+ messageClassName(message),
+ r,
+ senderOption.getOrElse(originAddress.getOrElse("")))
}
}
diff --git a/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala b/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala
index aef3d51919..db5102b6e3 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala
@@ -38,9 +38,10 @@ private[remote] trait OutboundEnvelope extends NoSerializationVerificationNeeded
*/
private[remote] object ReusableOutboundEnvelope {
def createObjectPool(capacity: Int) =
- new ObjectPool[ReusableOutboundEnvelope](capacity,
- create = () => new ReusableOutboundEnvelope,
- clear = outEnvelope => outEnvelope.clear())
+ new ObjectPool[ReusableOutboundEnvelope](
+ capacity,
+ create = () => new ReusableOutboundEnvelope,
+ clear = outEnvelope => outEnvelope.clear())
}
/**
diff --git a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala
index 254944a628..59a39b05b6 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala
@@ -94,9 +94,10 @@ abstract class RemoteInstrument {
* }}}
*
*/
-private[remote] final class RemoteInstruments(private val system: ExtendedActorSystem,
- private val log: LoggingAdapter,
- _instruments: Vector[RemoteInstrument]) {
+private[remote] final class RemoteInstruments(
+ private val system: ExtendedActorSystem,
+ private val log: LoggingAdapter,
+ _instruments: Vector[RemoteInstrument]) {
import RemoteInstruments._
def this(system: ExtendedActorSystem, log: LoggingAdapter) = this(system, log, RemoteInstruments.create(system, log))
@@ -122,9 +123,10 @@ private[remote] final class RemoteInstruments(private val system: ExtendedActorS
serializeInstrument(instrument, oe, buffer)
} catch {
case NonFatal(t) =>
- log.debug("Skipping serialization of RemoteInstrument {} since it failed with {}",
- instrument.identifier,
- t.getMessage)
+ log.debug(
+ "Skipping serialization of RemoteInstrument {} since it failed with {}",
+ instrument.identifier,
+ t.getMessage)
buffer.position(rewindPos)
}
i += 1
@@ -145,16 +147,18 @@ private[remote] final class RemoteInstruments(private val system: ExtendedActorS
}
}
- private def serializeInstrument(instrument: RemoteInstrument,
- outboundEnvelope: OutboundEnvelope,
- buffer: ByteBuffer): Unit = {
+ private def serializeInstrument(
+ instrument: RemoteInstrument,
+ outboundEnvelope: OutboundEnvelope,
+ buffer: ByteBuffer): Unit = {
val startPos = buffer.position()
buffer.putInt(0)
val dataPos = buffer.position()
- instrument.remoteWriteMetadata(outboundEnvelope.recipient.orNull,
- outboundEnvelope.message,
- outboundEnvelope.sender.orNull,
- buffer)
+ instrument.remoteWriteMetadata(
+ outboundEnvelope.recipient.orNull,
+ outboundEnvelope.message,
+ outboundEnvelope.sender.orNull,
+ buffer)
val endPos = buffer.position()
if (endPos == dataPos) {
// if the instrument didn't write anything, then rewind to the start
@@ -193,9 +197,10 @@ private[remote] final class RemoteInstruments(private val system: ExtendedActorS
deserializeInstrument(instrument, inboundEnvelope, buffer)
} catch {
case NonFatal(t) =>
- log.debug("Skipping deserialization of RemoteInstrument {} since it failed with {}",
- instrument.identifier,
- t.getMessage)
+ log.debug(
+ "Skipping deserialization of RemoteInstrument {} since it failed with {}",
+ instrument.identifier,
+ t.getMessage)
}
i += 1
} else if (key > identifier) {
@@ -211,8 +216,9 @@ private[remote] final class RemoteInstruments(private val system: ExtendedActorS
}
} else {
if (log.isDebugEnabled)
- log.debug("Skipping serialized data in message for RemoteInstrument(s) {} that has no local match",
- remoteInstrumentIdIteratorRaw(buffer, endPos).mkString("[", ", ", "]"))
+ log.debug(
+ "Skipping serialized data in message for RemoteInstrument(s) {} that has no local match",
+ remoteInstrumentIdIteratorRaw(buffer, endPos).mkString("[", ", ", "]"))
}
} catch {
case NonFatal(t) =>
@@ -222,13 +228,15 @@ private[remote] final class RemoteInstruments(private val system: ExtendedActorS
}
}
- private def deserializeInstrument(instrument: RemoteInstrument,
- inboundEnvelope: InboundEnvelope,
- buffer: ByteBuffer): Unit = {
- instrument.remoteReadMetadata(inboundEnvelope.recipient.orNull,
- inboundEnvelope.message,
- inboundEnvelope.sender.orNull,
- buffer)
+ private def deserializeInstrument(
+ instrument: RemoteInstrument,
+ inboundEnvelope: InboundEnvelope,
+ buffer: ByteBuffer): Unit = {
+ instrument.remoteReadMetadata(
+ inboundEnvelope.recipient.orNull,
+ inboundEnvelope.message,
+ inboundEnvelope.sender.orNull,
+ buffer)
}
def messageSent(outboundEnvelope: OutboundEnvelope, size: Int, time: Long): Unit = {
@@ -247,15 +255,17 @@ private[remote] final class RemoteInstruments(private val system: ExtendedActorS
messageSent(0)
}
- private def messageSentInstrument(instrument: RemoteInstrument,
- outboundEnvelope: OutboundEnvelope,
- size: Int,
- time: Long): Unit = {
- instrument.remoteMessageSent(outboundEnvelope.recipient.orNull,
- outboundEnvelope.message,
- outboundEnvelope.sender.orNull,
- size,
- time)
+ private def messageSentInstrument(
+ instrument: RemoteInstrument,
+ outboundEnvelope: OutboundEnvelope,
+ size: Int,
+ time: Long): Unit = {
+ instrument.remoteMessageSent(
+ outboundEnvelope.recipient.orNull,
+ outboundEnvelope.message,
+ outboundEnvelope.sender.orNull,
+ size,
+ time)
}
def messageReceived(inboundEnvelope: InboundEnvelope, size: Int, time: Long): Unit = {
@@ -274,15 +284,17 @@ private[remote] final class RemoteInstruments(private val system: ExtendedActorS
messageRecieved(0)
}
- private def messageReceivedInstrument(instrument: RemoteInstrument,
- inboundEnvelope: InboundEnvelope,
- size: Int,
- time: Long): Unit = {
- instrument.remoteMessageReceived(inboundEnvelope.recipient.orNull,
- inboundEnvelope.message,
- inboundEnvelope.sender.orNull,
- size,
- time)
+ private def messageReceivedInstrument(
+ instrument: RemoteInstrument,
+ inboundEnvelope: InboundEnvelope,
+ size: Int,
+ time: Long): Unit = {
+ instrument.remoteMessageReceived(
+ inboundEnvelope.recipient.orNull,
+ inboundEnvelope.message,
+ inboundEnvelope.sender.orNull,
+ size,
+ time)
}
private def remoteInstrumentIdIteratorRaw(buffer: ByteBuffer, endPos: Int): Iterator[Int] = {
diff --git a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala
index 2302c286a7..e8e6be2828 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala
@@ -70,10 +70,11 @@ import akka.util.OptionVal
/**
* INTERNAL API
*/
-@InternalApi private[remote] class SystemMessageDelivery(outboundContext: OutboundContext,
- deadLetters: ActorRef,
- resendInterval: FiniteDuration,
- maxBufferSize: Int)
+@InternalApi private[remote] class SystemMessageDelivery(
+ outboundContext: OutboundContext,
+ deadLetters: ActorRef,
+ resendInterval: FiniteDuration,
+ maxBufferSize: Int)
extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] {
import SystemMessageDelivery._
@@ -165,9 +166,10 @@ import akka.util.OptionVal
private val nackCallback = getAsyncCallback[Nack] { reply =>
if (reply.seqNo <= seqNo) {
ack(reply.seqNo)
- log.warning("Received negative acknowledgement of system message from [{}], highest acknowledged [{}]",
- outboundContext.remoteAddress,
- reply.seqNo)
+ log.warning(
+ "Received negative acknowledgement of system message from [{}], highest acknowledged [{}]",
+ outboundContext.remoteAddress,
+ reply.seqNo)
// Nack should be very rare (connection issue) so no urgency of resending, it will be resent
// by the scheduled tick.
}
@@ -363,10 +365,11 @@ import akka.util.OptionVal
push(out, unwrapped)
} else if (n < expectedSeqNo) {
if (log.isDebugEnabled)
- log.debug("Deduplicate system message [{}] from [{}], expected [{}]",
- n,
- fromRemoteAddressStr,
- expectedSeqNo)
+ log.debug(
+ "Deduplicate system message [{}] from [{}], expected [{}]",
+ n,
+ fromRemoteAddressStr,
+ expectedSeqNo)
inboundContext.sendControl(ackReplyTo.address, Ack(expectedSeqNo - 1, localAddress))
pull(in)
} else {
diff --git a/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala b/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala
index 0574e3c784..c484b7410a 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala
@@ -120,9 +120,10 @@ private[remote] class OutboundTestStage(outboundContext: OutboundContext, state:
override def onPush(): Unit = {
val env = grab(in)
if (state.isBlackhole(outboundContext.localAddress.address, outboundContext.remoteAddress)) {
- log.debug("dropping outbound message [{}] to [{}] because of blackhole",
- Logging.messageClassName(env.message),
- outboundContext.remoteAddress)
+ log.debug(
+ "dropping outbound message [{}] to [{}] because of blackhole",
+ Logging.messageClassName(env.message),
+ outboundContext.remoteAddress)
pull(in) // drop message
} else
push(out, env)
@@ -162,10 +163,11 @@ private[remote] class InboundTestStage(inboundContext: InboundContext, state: Sh
push(out, env)
case OptionVal.Some(association) =>
if (state.isBlackhole(inboundContext.localAddress.address, association.remoteAddress)) {
- log.debug("dropping inbound message [{}] from [{}] with UID [{}] because of blackhole",
- Logging.messageClassName(env.message),
- association.remoteAddress,
- env.originUid)
+ log.debug(
+ "dropping inbound message [{}] from [{}] with UID [{}] because of blackhole",
+ Logging.messageClassName(env.message),
+ association.remoteAddress,
+ env.originUid)
pull(in) // drop message
} else
push(out, env)
diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala
index 4b1261b1a9..7a42965d0b 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala
@@ -39,13 +39,14 @@ private[remote] object AeronSink {
private val TimerCheckPeriod = 1 << 13 // 8192
private val TimerCheckMask = TimerCheckPeriod - 1
- private final class OfferTask(pub: Publication,
- var buffer: UnsafeBuffer,
- var msgSize: Int,
- onOfferSuccess: AsyncCallback[Unit],
- giveUpAfter: Duration,
- onGiveUp: AsyncCallback[Unit],
- onPublicationClosed: AsyncCallback[Unit])
+ private final class OfferTask(
+ pub: Publication,
+ var buffer: UnsafeBuffer,
+ var msgSize: Int,
+ onOfferSuccess: AsyncCallback[Unit],
+ giveUpAfter: Duration,
+ onGiveUp: AsyncCallback[Unit],
+ onPublicationClosed: AsyncCallback[Unit])
extends (() => Boolean) {
val giveUpAfterNanos = giveUpAfter match {
case f: FiniteDuration => f.toNanos
@@ -85,13 +86,14 @@ private[remote] object AeronSink {
* INTERNAL API
* @param channel eg. "aeron:udp?endpoint=localhost:40123"
*/
-private[remote] class AeronSink(channel: String,
- streamId: Int,
- aeron: Aeron,
- taskRunner: TaskRunner,
- pool: EnvelopeBufferPool,
- giveUpAfter: Duration,
- flightRecorder: EventSink)
+private[remote] class AeronSink(
+ channel: String,
+ streamId: Int,
+ aeron: Aeron,
+ taskRunner: TaskRunner,
+ pool: EnvelopeBufferPool,
+ giveUpAfter: Duration,
+ flightRecorder: EventSink)
extends GraphStageWithMaterializedValue[SinkShape[EnvelopeBuffer], Future[Done]] {
import AeronSink._
import TaskRunner._
@@ -113,13 +115,14 @@ private[remote] class AeronSink(channel: String,
private val spinning = 2 * taskRunner.idleCpuLevel
private var backoffCount = spinning
private var lastMsgSize = 0
- private val offerTask = new OfferTask(pub,
- null,
- lastMsgSize,
- getAsyncCallback(_ => taskOnOfferSuccess()),
- giveUpAfter,
- getAsyncCallback(_ => onGiveUp()),
- getAsyncCallback(_ => onPublicationClosed()))
+ private val offerTask = new OfferTask(
+ pub,
+ null,
+ lastMsgSize,
+ getAsyncCallback(_ => taskOnOfferSuccess()),
+ giveUpAfter,
+ getAsyncCallback(_ => onGiveUp()),
+ getAsyncCallback(_ => onPublicationClosed()))
private val addOfferTask: Add = Add(offerTask)
private var offerTaskInProgress = false
diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala
index 4764969c21..5fd111c22d 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala
@@ -30,9 +30,10 @@ import scala.concurrent.{ Future, Promise }
*/
private[remote] object AeronSource {
- private def pollTask(sub: Subscription,
- handler: MessageHandler,
- onMessage: AsyncCallback[EnvelopeBuffer]): () => Boolean = { () =>
+ private def pollTask(
+ sub: Subscription,
+ handler: MessageHandler,
+ onMessage: AsyncCallback[EnvelopeBuffer]): () => Boolean = { () =>
{
handler.reset
sub.poll(handler.fragmentsHandler, 1)
@@ -78,13 +79,14 @@ private[remote] object AeronSource {
* @param spinning the amount of busy spinning to be done synchronously before deferring to the TaskRunner
* when waiting for data
*/
-private[remote] class AeronSource(channel: String,
- streamId: Int,
- aeron: Aeron,
- taskRunner: TaskRunner,
- pool: EnvelopeBufferPool,
- flightRecorder: EventSink,
- spinning: Int)
+private[remote] class AeronSource(
+ channel: String,
+ streamId: Int,
+ aeron: Aeron,
+ taskRunner: TaskRunner,
+ pool: EnvelopeBufferPool,
+ flightRecorder: EventSink,
+ spinning: Int)
extends GraphStageWithMaterializedValue[SourceShape[EnvelopeBuffer], AeronSource.AeronLifecycle] {
import AeronSource._
diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala
index fb1dc28fc3..6ab6b8d839 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala
@@ -152,9 +152,10 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro
}
} catch {
case NonFatal(e) =>
- log.warning("Couldn't delete Aeron embedded media driver files in [{}] due to [{}]",
- driver.aeronDirectoryName,
- e)
+ log.warning(
+ "Couldn't delete Aeron embedded media driver files in [{}] due to [{}]",
+ driver.aeronDirectoryName,
+ e)
}
}
}
@@ -200,13 +201,14 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro
private def handleFatalError(cause: Throwable): Unit = {
if (fatalErrorOccured.compareAndSet(false, true)) {
if (!isShutdown) {
- log.error(cause,
- "Fatal Aeron error {}. Have to terminate ActorSystem because it lost contact with the " +
- "{} Aeron media driver. Possible configuration properties to mitigate the problem are " +
- "'client-liveness-timeout' or 'driver-timeout'. {}",
- Logging.simpleName(cause),
- if (settings.Advanced.EmbeddedMediaDriver) "embedded" else "external",
- cause)
+ log.error(
+ cause,
+ "Fatal Aeron error {}. Have to terminate ActorSystem because it lost contact with the " +
+ "{} Aeron media driver. Possible configuration properties to mitigate the problem are " +
+ "'client-liveness-timeout' or 'driver-timeout'. {}",
+ Logging.simpleName(cause),
+ if (settings.Advanced.EmbeddedMediaDriver) "embedded" else "external",
+ cause)
taskRunner.stop()
aeronErrorLogTask.cancel()
if (settings.LogAeronCounters) aeronCounterTask.cancel()
@@ -275,9 +277,10 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro
}
}
- override protected def outboundTransportSink(outboundContext: OutboundContext,
- streamId: Int,
- bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = {
+ override protected def outboundTransportSink(
+ outboundContext: OutboundContext,
+ streamId: Int,
+ bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = {
val giveUpAfter =
if (streamId == ControlStreamId) settings.Advanced.GiveUpSystemMessageAfter
else settings.Advanced.GiveUpMessageAfter
@@ -286,24 +289,26 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro
// If we want to stop for Aeron also it is probably easier to stop the publication inside the
// AeronSink, i.e. not using a KillSwitch.
Sink.fromGraph(
- new AeronSink(outboundChannel(outboundContext.remoteAddress),
- streamId,
- aeron,
- taskRunner,
- bufferPool,
- giveUpAfter,
- createFlightRecorderEventSink()))
+ new AeronSink(
+ outboundChannel(outboundContext.remoteAddress),
+ streamId,
+ aeron,
+ taskRunner,
+ bufferPool,
+ giveUpAfter,
+ createFlightRecorderEventSink()))
}
private def aeronSource(streamId: Int, pool: EnvelopeBufferPool): Source[EnvelopeBuffer, AeronSource.AeronLifecycle] =
Source.fromGraph(
- new AeronSource(inboundChannel,
- streamId,
- aeron,
- taskRunner,
- pool,
- createFlightRecorderEventSink(),
- aeronSourceSpinningStrategy))
+ new AeronSource(
+ inboundChannel,
+ streamId,
+ aeron,
+ taskRunner,
+ pool,
+ createFlightRecorderEventSink(),
+ aeronSourceSpinningStrategy))
private def aeronSourceSpinningStrategy: Int =
if (settings.Advanced.InboundLanes > 1 || // spinning was identified to be the cause of massive slowdowns with multiple lanes, see #21365
@@ -357,9 +362,10 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro
laneSource
.toMat(
Sink.fromGraph(
- new FixedSizePartitionHub[InboundEnvelope](inboundLanePartitioner,
- inboundLanes,
- settings.Advanced.InboundHubBufferSize)))({
+ new FixedSizePartitionHub[InboundEnvelope](
+ inboundLanePartitioner,
+ inboundLanes,
+ settings.Advanced.InboundHubBufferSize)))({
case ((a, b), c) => (a, b, c)
})
.run()(materializer)
@@ -402,9 +408,10 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro
attachInboundStreamRestart("Inbound large message stream", completed, () => runInboundLargeMessagesStream())
}
- private def updateStreamMatValues(streamId: Int,
- aeronSourceLifecycle: AeronSource.AeronLifecycle,
- completed: Future[Done]): Unit = {
+ private def updateStreamMatValues(
+ streamId: Int,
+ aeronSourceLifecycle: AeronSource.AeronLifecycle,
+ completed: Future[Done]): Unit = {
implicit val ec = materializer.executionContext
updateStreamMatValues(streamId, InboundStreamMatValues[AeronLifecycle](aeronSourceLifecycle, completed.recover {
case _ => Done
diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala
index 6385614a4d..8da4f430ab 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala
@@ -27,8 +27,9 @@ private[remote] object CompressionProtocol {
* INTERNAL API
* Sent by the "receiving" node after allocating a compression id to a given [[akka.actor.ActorRef]]
*/
- private[remote] final case class ActorRefCompressionAdvertisement(from: UniqueAddress,
- table: CompressionTable[ActorRef])
+ private[remote] final case class ActorRefCompressionAdvertisement(
+ from: UniqueAddress,
+ table: CompressionTable[ActorRef])
extends CompressionAdvertisement[ActorRef]
/**
@@ -46,8 +47,9 @@ private[remote] object CompressionProtocol {
* INTERNAL API
* Sent by the "receiving" node after allocating a compression id to a given class manifest
*/
- private[remote] final case class ClassManifestCompressionAdvertisement(from: UniqueAddress,
- table: CompressionTable[String])
+ private[remote] final case class ClassManifestCompressionAdvertisement(
+ from: UniqueAddress,
+ table: CompressionTable[String])
extends CompressionAdvertisement[String]
/**
diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala
index fd8870f3c8..405d7fc81a 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala
@@ -27,11 +27,13 @@ private[remote] final case class CompressionTable[T](originUid: Long, version: B
// TODO: these are some expensive sanity checks, about the numbers being consecutive, without gaps
// TODO: we can remove them, make them re-map (not needed I believe though)
val expectedGaplessSum = Integer.valueOf((dictionary.size * (dictionary.size + 1)) / 2) /* Dirichlet */
- require(dictionary.values.min == 0,
- "Compression table should start allocating from 0, yet lowest allocated id was " + dictionary.values.min)
- require(dictionary.values.sum + dictionary.size == expectedGaplessSum,
- "Given compression map does not seem to be gap-less and starting from zero, " +
- "which makes compressing it into an Array difficult, bailing out! Map was: " + dictionary)
+ require(
+ dictionary.values.min == 0,
+ "Compression table should start allocating from 0, yet lowest allocated id was " + dictionary.values.min)
+ require(
+ dictionary.values.sum + dictionary.size == expectedGaplessSum,
+ "Given compression map does not seem to be gap-less and starting from zero, " +
+ "which makes compressing it into an Array difficult, bailing out! Map was: " + dictionary)
val tups = new Array[(Object, Int)](dictionary.size).asInstanceOf[Array[(T, Int)]]
val ts = new Array[Object](dictionary.size).asInstanceOf[Array[T]]
diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala
index 98bfab66ab..781917187b 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala
@@ -53,10 +53,11 @@ private[remote] trait InboundCompressions {
* One per incoming Aeron stream, actual compression tables are kept per-originUid and created on demand.
* All access is via the Decoder stage.
*/
-private[remote] final class InboundCompressionsImpl(system: ActorSystem,
- inboundContext: InboundContext,
- settings: ArterySettings.Compression,
- eventSink: EventSink = IgnoreEventSink)
+private[remote] final class InboundCompressionsImpl(
+ system: ActorSystem,
+ inboundContext: InboundContext,
+ settings: ArterySettings.Compression,
+ eventSink: EventSink = IgnoreEventSink)
extends InboundCompressions {
private[this] val _actorRefsIns = new Long2ObjectHashMap[InboundActorRefCompression]()
@@ -170,22 +171,24 @@ private[remote] final class InboundCompressionsImpl(system: ActorSystem,
* It can be used to advertise a compression table.
* If the association is not complete - we simply dont advertise the table, which is fine (handshake not yet complete).
*/
-private[remote] final class InboundActorRefCompression(log: LoggingAdapter,
- settings: ArterySettings.Compression,
- originUid: Long,
- inboundContext: InboundContext,
- heavyHitters: TopHeavyHitters[ActorRef])
+private[remote] final class InboundActorRefCompression(
+ log: LoggingAdapter,
+ settings: ArterySettings.Compression,
+ originUid: Long,
+ inboundContext: InboundContext,
+ heavyHitters: TopHeavyHitters[ActorRef])
extends InboundCompression[ActorRef](log, settings, originUid, inboundContext, heavyHitters) {
override def decompress(tableVersion: Byte, idx: Int): OptionVal[ActorRef] =
super.decompressInternal(tableVersion, idx, 0)
override def advertiseCompressionTable(outboundContext: OutboundContext, table: CompressionTable[ActorRef]): Unit = {
- log.debug(s"Advertise {} compression [{}] to [{}#{}]",
- Logging.simpleName(getClass),
- table,
- outboundContext.remoteAddress,
- originUid)
+ log.debug(
+ s"Advertise {} compression [{}] to [{}#{}]",
+ Logging.simpleName(getClass),
+ table,
+ outboundContext.remoteAddress,
+ originUid)
outboundContext.sendControl(
CompressionProtocol.ActorRefCompressionAdvertisement(inboundContext.localAddress, table))
}
@@ -194,19 +197,21 @@ private[remote] final class InboundActorRefCompression(log: LoggingAdapter,
/**
* INTERNAL API
*/
-private[remote] final class InboundManifestCompression(log: LoggingAdapter,
- settings: ArterySettings.Compression,
- originUid: Long,
- inboundContext: InboundContext,
- heavyHitters: TopHeavyHitters[String])
+private[remote] final class InboundManifestCompression(
+ log: LoggingAdapter,
+ settings: ArterySettings.Compression,
+ originUid: Long,
+ inboundContext: InboundContext,
+ heavyHitters: TopHeavyHitters[String])
extends InboundCompression[String](log, settings, originUid, inboundContext, heavyHitters) {
override def advertiseCompressionTable(outboundContext: OutboundContext, table: CompressionTable[String]): Unit = {
- log.debug(s"Advertise {} compression [{}] to [{}#{}]",
- Logging.simpleName(getClass),
- table,
- outboundContext.remoteAddress,
- originUid)
+ log.debug(
+ s"Advertise {} compression [{}] to [{}#{}]",
+ Logging.simpleName(getClass),
+ table,
+ outboundContext.remoteAddress,
+ originUid)
outboundContext.sendControl(
CompressionProtocol.ClassManifestCompressionAdvertisement(inboundContext.localAddress, table))
}
@@ -227,11 +232,12 @@ private[remote] object InboundCompression {
object Tables {
def empty[T] =
- Tables(oldTables = List(DecompressionTable.disabled[T]),
- activeTable = DecompressionTable.empty[T],
- nextTable = DecompressionTable.empty[T].copy(version = 1),
- advertisementInProgress = None,
- keepOldTables = KeepOldTablesNumber)
+ Tables(
+ oldTables = List(DecompressionTable.disabled[T]),
+ activeTable = DecompressionTable.empty[T],
+ nextTable = DecompressionTable.empty[T].copy(version = 1),
+ advertisementInProgress = None,
+ keepOldTables = KeepOldTablesNumber)
}
/**
@@ -241,11 +247,12 @@ private[remote] object InboundCompression {
* It starts with containing only a single "disabled" table (versioned as `DecompressionTable.DisabledVersion`),
* and from there on continuously accumulates at most [[keepOldTables]] recently used tables.
*/
- final case class Tables[T](oldTables: List[DecompressionTable[T]],
- activeTable: DecompressionTable[T],
- nextTable: DecompressionTable[T],
- advertisementInProgress: Option[CompressionTable[T]],
- keepOldTables: Int) {
+ final case class Tables[T](
+ oldTables: List[DecompressionTable[T]],
+ activeTable: DecompressionTable[T],
+ nextTable: DecompressionTable[T],
+ advertisementInProgress: Option[CompressionTable[T]],
+ keepOldTables: Int) {
def selectTable(version: Int): OptionVal[DecompressionTable[T]] = {
if (activeTable.version == version) {
@@ -282,11 +289,12 @@ private[remote] object InboundCompression {
if (version == 127) 0
else (version + 1).toByte
- Tables(oldTables = (activeTable :: oldTables).take(keepOldTables),
- activeTable = nextTable,
- nextTable = DecompressionTable.empty[T].copy(version = incrementTableVersion(nextTable.version)),
- advertisementInProgress = None,
- keepOldTables = keepOldTables)
+ Tables(
+ oldTables = (activeTable :: oldTables).take(keepOldTables),
+ activeTable = nextTable,
+ nextTable = DecompressionTable.empty[T].copy(version = incrementTableVersion(nextTable.version)),
+ advertisementInProgress = None,
+ keepOldTables = keepOldTables)
}
}
@@ -298,11 +306,12 @@ private[remote] object InboundCompression {
*
* Access to this class must be externally synchronised (e.g. by accessing it from only Actors or a GraphStage etc).
*/
-private[remote] abstract class InboundCompression[T >: Null](val log: LoggingAdapter,
- val settings: ArterySettings.Compression,
- val originUid: Long,
- inboundContext: InboundContext,
- val heavyHitters: TopHeavyHitters[T]) {
+private[remote] abstract class InboundCompression[T >: Null](
+ val log: LoggingAdapter,
+ val settings: ArterySettings.Compression,
+ val originUid: Long,
+ inboundContext: InboundContext,
+ val heavyHitters: TopHeavyHitters[T]) {
private[this] var tables: InboundCompression.Tables[T] = InboundCompression.Tables.empty
@@ -350,23 +359,25 @@ private[remote] abstract class InboundCompression[T >: Null](val log: LoggingAda
else throw new UnknownCompressedIdException(idx)
case _ if incomingVersionIsAdvertisementInProgress(incomingTableVersion) =>
- log.debug("Received first value from originUid [{}] compressed using the advertised compression table, " +
- "flipping to it (version: {})",
- originUid,
- current.nextTable.version)
+ log.debug(
+ "Received first value from originUid [{}] compressed using the advertised compression table, " +
+ "flipping to it (version: {})",
+ originUid,
+ current.nextTable.version)
confirmAdvertisement(incomingTableVersion, gaveUp = false)
decompressInternal(incomingTableVersion, idx, attemptCounter + 1) // recurse
case _ =>
// which means that incoming version was > nextTable.version, which likely that
// it is using a table that was built for previous incarnation of this system
- log.warning("Inbound message from originUid [{}] is using unknown compression table version. " +
- "It may have been sent with compression table built for previous incarnation of this system. " +
- "Versions activeTable: {}, nextTable: {}, incomingTable: {}",
- originUid,
- activeVersion,
- current.nextTable.version,
- incomingTableVersion)
+ log.warning(
+ "Inbound message from originUid [{}] is using unknown compression table version. " +
+ "It may have been sent with compression table built for previous incarnation of this system. " +
+ "Versions activeTable: {}, nextTable: {}, incomingTable: {}",
+ originUid,
+ activeVersion,
+ current.nextTable.version,
+ incomingTableVersion)
OptionVal.None
}
}
@@ -376,16 +387,18 @@ private[remote] abstract class InboundCompression[T >: Null](val log: LoggingAda
tables.advertisementInProgress match {
case Some(inProgress) if tableVersion == inProgress.version =>
tables = tables.startUsingNextTable()
- log.debug("{} compression table version [{}] for originUid [{}]",
- if (gaveUp) "Gave up" else "Confirmed",
- tableVersion,
- originUid)
+ log.debug(
+ "{} compression table version [{}] for originUid [{}]",
+ if (gaveUp) "Gave up" else "Confirmed",
+ tableVersion,
+ originUid)
case Some(inProgress) if tableVersion != inProgress.version =>
- log.debug("{} compression table version [{}] for originUid [{}] but other version in progress [{}]",
- if (gaveUp) "Gave up" else "Confirmed",
- tableVersion,
- originUid,
- inProgress.version)
+ log.debug(
+ "{} compression table version [{}] for originUid [{}] but other version in progress [{}]",
+ if (gaveUp) "Gave up" else "Confirmed",
+ tableVersion,
+ originUid,
+ inProgress.version)
case None =>
// already confirmed
}
@@ -434,9 +447,10 @@ private[remote] abstract class InboundCompression[T >: Null](val log: LoggingAda
resendCount = 0
advertiseCompressionTable(association, table)
} else if (association.isOrdinaryMessageStreamActive()) {
- log.debug("{} for originUid [{}] not changed, no need to advertise same.",
- Logging.simpleName(tables.activeTable),
- originUid)
+ log.debug(
+ "{} for originUid [{}] not changed, no need to advertise same.",
+ Logging.simpleName(tables.activeTable),
+ originUid)
}
case OptionVal.None =>
@@ -452,19 +466,21 @@ private[remote] abstract class InboundCompression[T >: Null](val log: LoggingAda
inboundContext.association(originUid) match {
case OptionVal.Some(association) =>
- log.debug("Advertisement in progress for originUid [{}] version [{}], resending [{}:{}]",
- originUid,
- inProgress.version,
- resendCount,
- maxResendCount)
+ log.debug(
+ "Advertisement in progress for originUid [{}] version [{}], resending [{}:{}]",
+ originUid,
+ inProgress.version,
+ resendCount,
+ maxResendCount)
advertiseCompressionTable(association, inProgress) // resend
case OptionVal.None =>
}
} else {
// give up, it might be dead
- log.debug("Advertisement in progress for originUid [{}] version [{}] but no confirmation after retries.",
- originUid,
- inProgress.version)
+ log.debug(
+ "Advertisement in progress for originUid [{}] version [{}] but no confirmation after retries.",
+ originUid,
+ inProgress.version)
confirmAdvertisement(inProgress.version, gaveUp = true)
}
}
diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala
index 358940a05e..78c1f3438e 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala
@@ -62,9 +62,10 @@ private[remote] object ArteryTcpTransport {
/**
* INTERNAL API
*/
-private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
- _provider: RemoteActorRefProvider,
- tlsEnabled: Boolean)
+private[remote] class ArteryTcpTransport(
+ _system: ExtendedActorSystem,
+ _provider: RemoteActorRefProvider,
+ tlsEnabled: Boolean)
extends ArteryTransport(_system, _provider) {
import ArteryTransport._
import ArteryTcpTransport._
@@ -87,8 +88,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
// load from config
OptionVal.Some(
system.dynamicAccess
- .createInstanceFor[SSLEngineProvider](settings.SSLEngineProviderClassName,
- List((classOf[ActorSystem], system)))
+ .createInstanceFor[SSLEngineProvider](
+ settings.SSLEngineProviderClassName,
+ List((classOf[ActorSystem], system)))
.recover {
case e =>
throw new ConfigurationException(
@@ -103,9 +105,10 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
// nothing specific here
}
- override protected def outboundTransportSink(outboundContext: OutboundContext,
- streamId: Int,
- bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = {
+ override protected def outboundTransportSink(
+ outboundContext: OutboundContext,
+ streamId: Int,
+ bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = {
implicit val sys: ActorSystem = system
val afr = createFlightRecorderEventSink()
@@ -117,15 +120,16 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
def connectionFlow: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] =
if (tlsEnabled) {
val sslProvider = sslEngineProvider.get
- Tcp().outgoingTlsConnectionWithSSLEngine(remoteAddress,
- createSSLEngine = () => sslProvider.createClientSSLEngine(host, port),
- connectTimeout = settings.Advanced.ConnectionTimeout,
- verifySession = session =>
- optionToTry(sslProvider.verifyClientSession(host, session)))
+ Tcp().outgoingTlsConnectionWithSSLEngine(
+ remoteAddress,
+ createSSLEngine = () => sslProvider.createClientSSLEngine(host, port),
+ connectTimeout = settings.Advanced.ConnectionTimeout,
+ verifySession = session => optionToTry(sslProvider.verifyClientSession(host, session)))
} else {
- Tcp().outgoingConnection(remoteAddress,
- halfClose = true, // issue https://github.com/akka/akka/issues/24392 if set to false
- connectTimeout = settings.Advanced.ConnectionTimeout)
+ Tcp().outgoingConnection(
+ remoteAddress,
+ halfClose = true, // issue https://github.com/akka/akka/issues/24392 if set to false
+ connectTimeout = settings.Advanced.ConnectionTimeout)
}
def connectionFlowWithRestart: Flow[ByteString, ByteString, NotUsed] = {
@@ -135,9 +139,10 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
Flow[ByteString]
.via(Flow.lazyInitAsync(() => {
// only open the actual connection if any new messages are sent
- afr.loFreq(TcpOutbound_Connected,
- s"${outboundContext.remoteAddress.host.get}:${outboundContext.remoteAddress.port.get} " +
- s"/ ${streamName(streamId)}")
+ afr.loFreq(
+ TcpOutbound_Connected,
+ s"${outboundContext.remoteAddress.host.get}:${outboundContext.remoteAddress.port.get} " +
+ s"/ ${streamName(streamId)}")
if (controlIdleKillSwitch.isDefined)
outboundContext.asInstanceOf[Association].setControlIdleKillSwitch(controlIdleKillSwitch)
Future.successful(
@@ -162,10 +167,11 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
// Restart of inner connection part important in control stream, since system messages
// are buffered and resent from the outer SystemMessageDelivery stage. No maxRestarts limit for control
// stream. For message stream it's best effort retry a few times.
- RestartFlow.withBackoff[ByteString, ByteString](settings.Advanced.OutboundRestartBackoff,
- settings.Advanced.OutboundRestartBackoff * 5,
- 0.1,
- maxRestarts)(flowFactory)
+ RestartFlow.withBackoff[ByteString, ByteString](
+ settings.Advanced.OutboundRestartBackoff,
+ settings.Advanced.OutboundRestartBackoff * 5,
+ 0.1,
+ maxRestarts)(flowFactory)
}
Flow[EnvelopeBuffer]
@@ -213,10 +219,11 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
if (largeMessageChannelEnabled)
runInboundLargeMessagesStream()
else
- (Flow[EnvelopeBuffer]
- .map(_ => log.warning("Dropping large message, missing large-message-destinations configuration."))
- .to(Sink.ignore),
- Promise[Done]().future) // never completed, not enabled
+ (
+ Flow[EnvelopeBuffer]
+ .map(_ => log.warning("Dropping large message, missing large-message-destinations configuration."))
+ .to(Sink.ignore),
+ Promise[Done]().future) // never completed, not enabled
}
// An inbound connection will only use one of the control, ordinary or large streams, but we have to
@@ -260,11 +267,11 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
val connectionSource: Source[Tcp.IncomingConnection, Future[ServerBinding]] =
if (tlsEnabled) {
val sslProvider = sslEngineProvider.get
- Tcp().bindTlsWithSSLEngine(interface = bindHost,
- port = bindPort,
- createSSLEngine = () => sslProvider.createServerSSLEngine(bindHost, bindPort),
- verifySession =
- session => optionToTry(sslProvider.verifyServerSession(bindHost, session)))
+ Tcp().bindTlsWithSSLEngine(
+ interface = bindHost,
+ port = bindPort,
+ createSSLEngine = () => sslProvider.createServerSSLEngine(bindHost, bindPort),
+ verifySession = session => optionToTry(sslProvider.verifyServerSession(bindHost, session)))
} else {
Tcp().bind(interface = bindHost, port = bindPort, halfClose = false)
}
@@ -274,8 +281,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
val afr = createFlightRecorderEventSink()
val binding = connectionSource
.to(Sink.foreach { connection =>
- afr.loFreq(TcpInbound_Connected,
- s"${connection.remoteAddress.getHostString}:${connection.remoteAddress.getPort}")
+ afr.loFreq(
+ TcpInbound_Connected,
+ s"${connection.remoteAddress.getHostString}:${connection.remoteAddress.getPort}")
connection.handleWith(inboundConnectionFlow)
})
.run()
@@ -364,9 +372,10 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
laneSource
.toMat(
Sink.fromGraph(
- new FixedSizePartitionHub[InboundEnvelope](inboundLanePartitioner,
- inboundLanes,
- settings.Advanced.InboundHubBufferSize)))({
+ new FixedSizePartitionHub[InboundEnvelope](
+ inboundLanePartitioner,
+ inboundLanes,
+ settings.Advanced.InboundHubBufferSize)))({
case ((a, b), c) => (a, b, c)
})
.run()(materializer)
@@ -417,8 +426,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
private def updateStreamMatValues(completed: Future[Done]): Unit = {
implicit val ec: ExecutionContext = materializer.executionContext
- updateStreamMatValues(ControlStreamId,
- InboundStreamMatValues[NotUsed](NotUsed, completed.recover { case _ => Done }))
+ updateStreamMatValues(
+ ControlStreamId,
+ InboundStreamMatValues[NotUsed](NotUsed, completed.recover { case _ => Done }))
}
override protected def shutdownTransport(): Future[Done] = {
@@ -438,8 +448,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem,
b <- binding
_ <- b.unbind()
} yield {
- topLevelFlightRecorder.loFreq(TcpInbound_Bound,
- s"${localAddress.address.host.get}:${localAddress.address.port}")
+ topLevelFlightRecorder.loFreq(
+ TcpInbound_Bound,
+ s"${localAddress.address.host.get}:${localAddress.address.port}")
Done
}
case None =>
diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala
index b39ce64c6a..bf74b15bcb 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala
@@ -68,8 +68,9 @@ class SslTransportException(message: String, cause: Throwable) extends RuntimeEx
extends SSLEngineProvider {
def this(system: ActorSystem) =
- this(system.settings.config.getConfig("akka.remote.artery.ssl.config-ssl-engine"),
- Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName))
+ this(
+ system.settings.config.getConfig("akka.remote.artery.ssl.config-ssl-engine"),
+ Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName))
val SSLKeyStore: String = config.getString("key-store")
val SSLTrustStore: String = config.getString("trust-store")
@@ -87,11 +88,12 @@ class SslTransportException(message: String, cause: Throwable) extends RuntimeEx
if (HostnameVerification)
log.debug("TLS/SSL hostname verification is enabled.")
else
- log.warning(LogMarker.Security,
- "TLS/SSL hostname verification is disabled. " +
- "Please configure akka.remote.artery.ssl.config-ssl-engine.hostname-verification=on " +
- "and ensure the X.509 certificate on the host is correct to remove this warning. " +
- "See Akka reference documentation for more information.")
+ log.warning(
+ LogMarker.Security,
+ "TLS/SSL hostname verification is disabled. " +
+ "Please configure akka.remote.artery.ssl.config-ssl-engine.hostname-verification=on " +
+ "and ensure the X.509 certificate on the host is correct to remove this warning. " +
+ "See Akka reference documentation for more information.")
constructContext()
}
@@ -233,9 +235,10 @@ object SSLEngineProviderSetup {
new SecureRandom
case unknown =>
- log.warning(LogMarker.Security,
- "Unknown SSL random number generator [{}] falling back to SecureRandom",
- unknown)
+ log.warning(
+ LogMarker.Security,
+ "Unknown SSL random number generator [{}] falling back to SecureRandom",
+ unknown)
new SecureRandom
}
rng.nextInt() // prevent stall on first access
diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala
index 574a76a37a..bb50dd7d3a 100644
--- a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala
+++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala
@@ -49,10 +49,11 @@ import akka.util.ByteString
* of the frame. The `frameLength` is encoded as 4 bytes (little endian).
*/
def encodeFrameHeader(frameLength: Int): ByteString =
- ByteString((frameLength & 0xff).toByte,
- ((frameLength & 0xff00) >> 8).toByte,
- ((frameLength & 0xff0000) >> 16).toByte,
- ((frameLength & 0xff000000) >> 24).toByte)
+ ByteString(
+ (frameLength & 0xff).toByte,
+ ((frameLength & 0xff00) >> 8).toByte,
+ ((frameLength & 0xff0000) >> 16).toByte,
+ ((frameLength & 0xff000000) >> 24).toByte)
}
/**
diff --git a/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala
index c6ce8a9748..77e9b28541 100644
--- a/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala
+++ b/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala
@@ -49,9 +49,10 @@ final case class RemoteRouterConfig(local: Pool, nodes: Iterable[Address]) exten
override def newRoutee(routeeProps: Props, context: ActorContext): Routee = {
val name = "c" + childNameCounter.incrementAndGet
- val deploy = Deploy(config = ConfigFactory.empty(),
- routerConfig = routeeProps.routerConfig,
- scope = RemoteScope(nodeAddressIter.next))
+ val deploy = Deploy(
+ config = ConfigFactory.empty(),
+ routerConfig = routeeProps.routerConfig,
+ scope = RemoteScope(nodeAddressIter.next))
// attachChild means that the provider will treat this call as if possibly done out of the wrong
// context and use RepointableActorRef instead of LocalActorRef. Seems like a slightly sub-optimal
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala
index d631f28f26..41de9f66da 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala
@@ -151,9 +151,10 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste
builder.build
}
- def deserializeCompressionAdvertisement[T, U](bytes: Array[Byte],
- keyDeserializer: String => T,
- create: (UniqueAddress, CompressionTable[T]) => U): U = {
+ def deserializeCompressionAdvertisement[T, U](
+ bytes: Array[Byte],
+ keyDeserializer: String => T,
+ create: (UniqueAddress, CompressionTable[T]) => U): U = {
val protoAdv = ArteryControlFormats.CompressionTableAdvertisement.parseFrom(bytes)
val kvs =
@@ -171,8 +172,9 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste
.setVersion(version)
.build()
- def deserializeCompressionTableAdvertisementAck(bytes: Array[Byte],
- create: (UniqueAddress, Byte) => AnyRef): AnyRef = {
+ def deserializeCompressionTableAdvertisementAck(
+ bytes: Array[Byte],
+ create: (UniqueAddress, Byte) => AnyRef): AnyRef = {
val msg = ArteryControlFormats.CompressionTableAdvertisementAck.parseFrom(bytes)
create(deserializeUniqueAddress(msg.getFrom), msg.getVersion.toByte)
}
@@ -197,16 +199,18 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste
SystemMessageDelivery.SystemMessageEnvelope(
serialization
- .deserialize(protoEnv.getMessage.toByteArray,
- protoEnv.getSerializerId,
- if (protoEnv.hasMessageManifest) protoEnv.getMessageManifest.toStringUtf8 else "")
+ .deserialize(
+ protoEnv.getMessage.toByteArray,
+ protoEnv.getSerializerId,
+ if (protoEnv.hasMessageManifest) protoEnv.getMessageManifest.toStringUtf8 else "")
.get,
protoEnv.getSeqNo,
deserializeUniqueAddress(protoEnv.getAckReplyTo))
}
- def serializeSystemMessageDeliveryAck(seqNo: Long,
- from: UniqueAddress): ArteryControlFormats.SystemMessageDeliveryAck =
+ def serializeSystemMessageDeliveryAck(
+ seqNo: Long,
+ from: UniqueAddress): ArteryControlFormats.SystemMessageDeliveryAck =
ArteryControlFormats.SystemMessageDeliveryAck.newBuilder.setSeqNo(seqNo).setFrom(serializeUniqueAddress(from)).build
def deserializeSystemMessageDeliveryAck(bytes: Array[Byte], create: (Long, UniqueAddress) => AnyRef): AnyRef = {
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala
index 8276b5e6bc..be1f438a8a 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala
@@ -104,9 +104,10 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys
if (protoDeploy.hasConfig) {
if (protoDeploy.hasConfigSerializerId) {
serialization
- .deserialize(protoDeploy.getConfig.toByteArray,
- protoDeploy.getConfigSerializerId,
- protoDeploy.getConfigManifest)
+ .deserialize(
+ protoDeploy.getConfig.toByteArray,
+ protoDeploy.getConfigSerializerId,
+ protoDeploy.getConfigManifest)
.get
.asInstanceOf[Config]
} else {
@@ -119,9 +120,10 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys
if (protoDeploy.hasRouterConfig) {
if (protoDeploy.hasRouterConfigSerializerId) {
serialization
- .deserialize(protoDeploy.getRouterConfig.toByteArray,
- protoDeploy.getRouterConfigSerializerId,
- protoDeploy.getRouterConfigManifest)
+ .deserialize(
+ protoDeploy.getRouterConfig.toByteArray,
+ protoDeploy.getRouterConfigSerializerId,
+ protoDeploy.getRouterConfigManifest)
.get
.asInstanceOf[RouterConfig]
} else {
@@ -134,9 +136,10 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys
if (protoDeploy.hasScope) {
if (protoDeploy.hasScopeSerializerId) {
serialization
- .deserialize(protoDeploy.getScope.toByteArray,
- protoDeploy.getScopeSerializerId,
- protoDeploy.getScopeManifest)
+ .deserialize(
+ protoDeploy.getScope.toByteArray,
+ protoDeploy.getScopeSerializerId,
+ protoDeploy.getScopeManifest)
.get
.asInstanceOf[Scope]
} else {
@@ -179,10 +182,11 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys
Props(deploy(proto.getProps.getDeploy), actorClass, args)
}
- DaemonMsgCreate(props = props,
- deploy = deploy(proto.getDeploy),
- path = proto.getPath,
- supervisor = deserializeActorRef(system, proto.getSupervisor))
+ DaemonMsgCreate(
+ props = props,
+ deploy = deploy(proto.getDeploy),
+ path = proto.getPath,
+ supervisor = deserializeActorRef(system, proto.getSupervisor))
}
private def serialize(any: Any): (Int, Boolean, String, Array[Byte]) = {
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala
index 77cf973cc2..7c92302f86 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala
@@ -53,8 +53,9 @@ class MessageContainerSerializer(val system: ExtendedActorSystem) extends BaseSe
builder.build().toByteArray
}
- private def buildPattern(matcher: Option[String],
- tpe: ContainerFormats.PatternType): ContainerFormats.Selection.Builder = {
+ private def buildPattern(
+ matcher: Option[String],
+ tpe: ContainerFormats.PatternType): ContainerFormats.Selection.Builder = {
val builder = ContainerFormats.Selection.newBuilder().setType(tpe)
matcher.foreach(builder.setMatcher)
builder
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala
index 19ee710231..f0dd1da1f7 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala
@@ -225,10 +225,11 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
builder.build().toByteArray
}
- private def buildGenericRoutingPool(nrOfInstances: Int,
- routerDispatcher: String,
- usePoolDispatcher: Boolean,
- resizer: Option[Resizer]): WireFormats.GenericRoutingPool = {
+ private def buildGenericRoutingPool(
+ nrOfInstances: Int,
+ routerDispatcher: String,
+ usePoolDispatcher: Boolean,
+ resizer: Option[Resizer]): WireFormats.GenericRoutingPool = {
val builder = WireFormats.GenericRoutingPool.newBuilder()
builder.setNrOfInstances(nrOfInstances)
if (routerDispatcher != Dispatchers.DefaultDispatcherId) {
@@ -299,36 +300,36 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
private val TailChoppingPoolManifest = "ROTCP"
private val RemoteRouterConfigManifest = "RORRC"
- private val fromBinaryMap = Map[String, Array[Byte] => AnyRef](IdentifyManifest -> deserializeIdentify,
- ActorIdentityManifest -> deserializeActorIdentity,
- StatusSuccessManifest -> deserializeStatusSuccess,
- StatusFailureManifest -> deserializeStatusFailure,
- ThrowableManifest -> throwableSupport.deserializeThrowable,
- ActorRefManifest -> deserializeActorRefBytes,
- OptionManifest -> deserializeOption,
- OptionalManifest -> deserializeOptional,
- PoisonPillManifest -> ((_) => PoisonPill),
- KillManifest -> ((_) => Kill),
- RemoteWatcherHBManifest -> ((_) =>
- RemoteWatcher.Heartbeat),
- DoneManifest -> ((_) => Done),
- NotUsedManifest -> ((_) => NotUsed),
- AddressManifest -> deserializeAddressData,
- UniqueAddressManifest -> deserializeUniqueAddress,
- RemoteWatcherHBRespManifest -> deserializeHeartbeatRsp,
- ActorInitializationExceptionManifest -> deserializeActorInitializationException,
- LocalScopeManifest -> ((_) => LocalScope),
- RemoteScopeManifest -> deserializeRemoteScope,
- ConfigManifest -> deserializeConfig,
- FromConfigManifest -> deserializeFromConfig,
- DefaultResizerManifest -> deserializeDefaultResizer,
- BalancingPoolManifest -> deserializeBalancingPool,
- BroadcastPoolManifest -> deserializeBroadcastPool,
- RandomPoolManifest -> deserializeRandomPool,
- RoundRobinPoolManifest -> deserializeRoundRobinPool,
- ScatterGatherPoolManifest -> deserializeScatterGatherPool,
- TailChoppingPoolManifest -> deserializeTailChoppingPool,
- RemoteRouterConfigManifest -> deserializeRemoteRouterConfig)
+ private val fromBinaryMap = Map[String, Array[Byte] => AnyRef](
+ IdentifyManifest -> deserializeIdentify,
+ ActorIdentityManifest -> deserializeActorIdentity,
+ StatusSuccessManifest -> deserializeStatusSuccess,
+ StatusFailureManifest -> deserializeStatusFailure,
+ ThrowableManifest -> throwableSupport.deserializeThrowable,
+ ActorRefManifest -> deserializeActorRefBytes,
+ OptionManifest -> deserializeOption,
+ OptionalManifest -> deserializeOptional,
+ PoisonPillManifest -> ((_) => PoisonPill),
+ KillManifest -> ((_) => Kill),
+ RemoteWatcherHBManifest -> ((_) => RemoteWatcher.Heartbeat),
+ DoneManifest -> ((_) => Done),
+ NotUsedManifest -> ((_) => NotUsed),
+ AddressManifest -> deserializeAddressData,
+ UniqueAddressManifest -> deserializeUniqueAddress,
+ RemoteWatcherHBRespManifest -> deserializeHeartbeatRsp,
+ ActorInitializationExceptionManifest -> deserializeActorInitializationException,
+ LocalScopeManifest -> ((_) => LocalScope),
+ RemoteScopeManifest -> deserializeRemoteScope,
+ ConfigManifest -> deserializeConfig,
+ FromConfigManifest -> deserializeFromConfig,
+ DefaultResizerManifest -> deserializeDefaultResizer,
+ BalancingPoolManifest -> deserializeBalancingPool,
+ BroadcastPoolManifest -> deserializeBroadcastPool,
+ RandomPoolManifest -> deserializeRandomPool,
+ RoundRobinPoolManifest -> deserializeRoundRobinPool,
+ ScatterGatherPoolManifest -> deserializeScatterGatherPool,
+ TailChoppingPoolManifest -> deserializeTailChoppingPool,
+ RemoteRouterConfigManifest -> deserializeRemoteRouterConfig)
override def manifest(o: AnyRef): String =
o match {
@@ -424,18 +425,20 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
addressFromDataProto(WireFormats.AddressData.parseFrom(bytes))
private def addressFromDataProto(a: WireFormats.AddressData): Address = {
- Address(a.getProtocol,
- a.getSystem,
- // technically the presence of hostname and port are guaranteed, see our serializeAddressData
- if (a.hasHostname) Some(a.getHostname) else None,
- if (a.hasPort) Some(a.getPort) else None)
+ Address(
+ a.getProtocol,
+ a.getSystem,
+ // technically the presence of hostname and port are guaranteed, see our serializeAddressData
+ if (a.hasHostname) Some(a.getHostname) else None,
+ if (a.hasPort) Some(a.getPort) else None)
}
private def addressFromProto(a: ArteryControlFormats.Address): Address = {
- Address(a.getProtocol,
- a.getSystem,
- // technically the presence of hostname and port are guaranteed, see our serializeAddressData
- if (a.hasHostname) Some(a.getHostname) else None,
- if (a.hasPort) Some(a.getPort) else None)
+ Address(
+ a.getProtocol,
+ a.getSystem,
+ // technically the presence of hostname and port are guaranteed, see our serializeAddressData
+ if (a.hasHostname) Some(a.getHostname) else None,
+ if (a.hasPort) Some(a.getPort) else None)
}
private def deserializeUniqueAddress(bytes: Array[Byte]): UniqueAddress = {
@@ -457,9 +460,10 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
if (message.startsWith(refString)) message.drop(refString.length + 2)
else message
- ActorInitializationException(if (serializedEx.hasActor) ref else null,
- reconstructedMessage,
- payloadSupport.deserializePayload(serializedEx.getCause).asInstanceOf[Throwable])
+ ActorInitializationException(
+ if (serializedEx.hasActor) ref else null,
+ reconstructedMessage,
+ payloadSupport.deserializePayload(serializedEx.getCause).asInstanceOf[Throwable])
}
private def deserializeRemoteScope(bytes: Array[Byte]): RemoteScope = {
@@ -484,90 +488,91 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
private def deserializeBalancingPool(bytes: Array[Byte]): BalancingPool = {
val bp = WireFormats.GenericRoutingPool.parseFrom(bytes)
- BalancingPool(nrOfInstances = bp.getNrOfInstances,
- routerDispatcher =
- if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId)
+ BalancingPool(
+ nrOfInstances = bp.getNrOfInstances,
+ routerDispatcher = if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId)
}
private def deserializeBroadcastPool(bytes: Array[Byte]): BroadcastPool = {
val bp = WireFormats.GenericRoutingPool.parseFrom(bytes)
- BroadcastPool(nrOfInstances = bp.getNrOfInstances,
- resizer =
- if (bp.hasResizer) Some(payloadSupport.deserializePayload(bp.getResizer).asInstanceOf[Resizer])
- else None,
- routerDispatcher =
- if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId,
- usePoolDispatcher = bp.getUsePoolDispatcher)
+ BroadcastPool(
+ nrOfInstances = bp.getNrOfInstances,
+ resizer =
+ if (bp.hasResizer) Some(payloadSupport.deserializePayload(bp.getResizer).asInstanceOf[Resizer])
+ else None,
+ routerDispatcher = if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId,
+ usePoolDispatcher = bp.getUsePoolDispatcher)
}
private def deserializeRandomPool(bytes: Array[Byte]): RandomPool = {
val rp = WireFormats.GenericRoutingPool.parseFrom(bytes)
- RandomPool(nrOfInstances = rp.getNrOfInstances,
- resizer =
- if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer])
- else None,
- routerDispatcher =
- if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId,
- usePoolDispatcher = rp.getUsePoolDispatcher)
+ RandomPool(
+ nrOfInstances = rp.getNrOfInstances,
+ resizer =
+ if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer])
+ else None,
+ routerDispatcher = if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId,
+ usePoolDispatcher = rp.getUsePoolDispatcher)
}
private def deserializeRoundRobinPool(bytes: Array[Byte]): RoundRobinPool = {
val rp = WireFormats.GenericRoutingPool.parseFrom(bytes)
- RoundRobinPool(nrOfInstances = rp.getNrOfInstances,
- resizer =
- if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer])
- else None,
- routerDispatcher =
- if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId,
- usePoolDispatcher = rp.getUsePoolDispatcher)
+ RoundRobinPool(
+ nrOfInstances = rp.getNrOfInstances,
+ resizer =
+ if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer])
+ else None,
+ routerDispatcher = if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId,
+ usePoolDispatcher = rp.getUsePoolDispatcher)
}
private def deserializeScatterGatherPool(bytes: Array[Byte]): ScatterGatherFirstCompletedPool = {
val sgp = WireFormats.ScatterGatherPool.parseFrom(bytes)
- ScatterGatherFirstCompletedPool(nrOfInstances = sgp.getGeneric.getNrOfInstances,
- resizer =
- if (sgp.getGeneric.hasResizer)
- Some(
- payloadSupport
- .deserializePayload(sgp.getGeneric.getResizer)
- .asInstanceOf[Resizer])
- else None,
- within = deserializeFiniteDuration(sgp.getWithin),
- routerDispatcher =
- if (sgp.getGeneric.hasRouterDispatcher) sgp.getGeneric.getRouterDispatcher
- else Dispatchers.DefaultDispatcherId)
+ ScatterGatherFirstCompletedPool(
+ nrOfInstances = sgp.getGeneric.getNrOfInstances,
+ resizer =
+ if (sgp.getGeneric.hasResizer)
+ Some(payloadSupport.deserializePayload(sgp.getGeneric.getResizer).asInstanceOf[Resizer])
+ else None,
+ within = deserializeFiniteDuration(sgp.getWithin),
+ routerDispatcher =
+ if (sgp.getGeneric.hasRouterDispatcher) sgp.getGeneric.getRouterDispatcher
+ else Dispatchers.DefaultDispatcherId)
}
private def deserializeTailChoppingPool(bytes: Array[Byte]): TailChoppingPool = {
val tcp = WireFormats.TailChoppingPool.parseFrom(bytes)
- TailChoppingPool(nrOfInstances = tcp.getGeneric.getNrOfInstances,
- resizer =
- if (tcp.getGeneric.hasResizer)
- Some(payloadSupport.deserializePayload(tcp.getGeneric.getResizer).asInstanceOf[Resizer])
- else None,
- routerDispatcher =
- if (tcp.getGeneric.hasRouterDispatcher) tcp.getGeneric.getRouterDispatcher
- else Dispatchers.DefaultDispatcherId,
- usePoolDispatcher = tcp.getGeneric.getUsePoolDispatcher,
- within = deserializeFiniteDuration(tcp.getWithin),
- interval = deserializeFiniteDuration(tcp.getInterval))
+ TailChoppingPool(
+ nrOfInstances = tcp.getGeneric.getNrOfInstances,
+ resizer =
+ if (tcp.getGeneric.hasResizer)
+ Some(payloadSupport.deserializePayload(tcp.getGeneric.getResizer).asInstanceOf[Resizer])
+ else None,
+ routerDispatcher =
+ if (tcp.getGeneric.hasRouterDispatcher) tcp.getGeneric.getRouterDispatcher
+ else Dispatchers.DefaultDispatcherId,
+ usePoolDispatcher = tcp.getGeneric.getUsePoolDispatcher,
+ within = deserializeFiniteDuration(tcp.getWithin),
+ interval = deserializeFiniteDuration(tcp.getInterval))
}
private def deserializeRemoteRouterConfig(bytes: Array[Byte]): RemoteRouterConfig = {
val rrc = WireFormats.RemoteRouterConfig.parseFrom(bytes)
- RemoteRouterConfig(local = payloadSupport.deserializePayload(rrc.getLocal).asInstanceOf[Pool],
- nodes = rrc.getNodesList.asScala.map(deserializeAddressData))
+ RemoteRouterConfig(
+ local = payloadSupport.deserializePayload(rrc.getLocal).asInstanceOf[Pool],
+ nodes = rrc.getNodesList.asScala.map(deserializeAddressData))
}
private def deserializeDefaultResizer(bytes: Array[Byte]): DefaultResizer = {
val dr = WireFormats.DefaultResizer.parseFrom(bytes)
- DefaultResizer(lowerBound = dr.getLowerBound,
- upperBound = dr.getUpperBound,
- pressureThreshold = dr.getPressureThreshold,
- rampupRate = dr.getRampupRate,
- backoffThreshold = dr.getBackoffThreshold,
- backoffRate = dr.getBackoffRate,
- messagesPerResize = dr.getMessagesPerResize)
+ DefaultResizer(
+ lowerBound = dr.getLowerBound,
+ upperBound = dr.getUpperBound,
+ pressureThreshold = dr.getPressureThreshold,
+ rampupRate = dr.getRampupRate,
+ backoffThreshold = dr.getBackoffThreshold,
+ backoffRate = dr.getBackoffRate,
+ messagesPerResize = dr.getMessagesPerResize)
}
private def deserializeTimeUnit(unit: WireFormats.TimeUnit): TimeUnit = unit match {
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala
index 80bae8cafa..6b87968206 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala
@@ -57,8 +57,9 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer
val unCachedParsingMethod =
if (method eq null) clazz.getDeclaredMethod("parseFrom", ProtobufSerializer.ARRAY_OF_BYTE_ARRAY: _*)
else method
- if (parsingMethodBindingRef.compareAndSet(parsingMethodBinding,
- parsingMethodBinding.updated(clazz, unCachedParsingMethod)))
+ if (parsingMethodBindingRef.compareAndSet(
+ parsingMethodBinding,
+ parsingMethodBinding.updated(clazz, unCachedParsingMethod)))
unCachedParsingMethod
else
parsingMethod(unCachedParsingMethod)
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala
index 71bc494db2..83e9e53484 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala
@@ -120,22 +120,26 @@ class SystemMessageSerializer(val system: ExtendedActorSystem) extends BaseSeria
Supervise(deserializeActorRef(sysmsg.getSuperviseData.getChild), sysmsg.getSuperviseData.getAsync)
case WATCH =>
- Watch(deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef],
- deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef])
+ Watch(
+ deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef],
+ deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef])
case UNWATCH =>
- Unwatch(deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef],
- deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef])
+ Unwatch(
+ deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef],
+ deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef])
case FAILED =>
- Failed(deserializeActorRef(sysmsg.getFailedData.getChild),
- getCauseThrowable(sysmsg),
- sysmsg.getFailedData.getUid.toInt)
+ Failed(
+ deserializeActorRef(sysmsg.getFailedData.getChild),
+ getCauseThrowable(sysmsg),
+ sysmsg.getFailedData.getUid.toInt)
case DEATHWATCH_NOTIFICATION =>
- DeathWatchNotification(deserializeActorRef(sysmsg.getDwNotificationData.getActor),
- sysmsg.getDwNotificationData.getExistenceConfirmed,
- sysmsg.getDwNotificationData.getAddressTerminated)
+ DeathWatchNotification(
+ deserializeActorRef(sysmsg.getDwNotificationData.getActor),
+ sysmsg.getDwNotificationData.getExistenceConfirmed,
+ sysmsg.getDwNotificationData.getAddressTerminated)
}
private def serializeThrowable(throwable: Throwable): ContainerFormats.Payload.Builder = {
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala b/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala
index 4899047bbc..c08935cda1 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala
@@ -57,8 +57,9 @@ private[akka] class ThrowableSupport(system: ExtendedActorSystem) {
if (protoT.hasCause) {
val cause = payloadSupport.deserializePayload(protoT.getCause).asInstanceOf[Throwable]
system.dynamicAccess
- .createInstanceFor[Throwable](protoT.getClassName,
- List(classOf[String] -> protoT.getMessage, classOf[Throwable] -> cause))
+ .createInstanceFor[Throwable](
+ protoT.getClassName,
+ List(classOf[String] -> protoT.getMessage, classOf[Throwable] -> cause))
.get
} else {
// Important security note: before creating an instance of from the class name we
@@ -73,10 +74,11 @@ private[akka] class ThrowableSupport(system: ExtendedActorSystem) {
val stackTrace =
protoT.getStackTraceList.asScala.map { elem =>
val fileName = elem.getFileName
- new StackTraceElement(elem.getClassName,
- elem.getMethodName,
- if (fileName.length > 0) fileName else null,
- elem.getLineNumber)
+ new StackTraceElement(
+ elem.getClassName,
+ elem.getMethodName,
+ if (fileName.length > 0) fileName else null,
+ elem.getLineNumber)
}.toArray
t.setStackTrace(stackTrace)
t
diff --git a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala
index 25b3e29c34..1b9a6109bb 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala
@@ -75,8 +75,9 @@ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transpor
protected def maximumOverhead: Int
- protected def interceptListen(listenAddress: Address,
- listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener]
+ protected def interceptListen(
+ listenAddress: Address,
+ listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener]
protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit
@@ -123,10 +124,11 @@ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transpor
}
-abstract class AbstractTransportAdapterHandle(val originalLocalAddress: Address,
- val originalRemoteAddress: Address,
- val wrappedHandle: AssociationHandle,
- val addedSchemeIdentifier: String)
+abstract class AbstractTransportAdapterHandle(
+ val originalLocalAddress: Address,
+ val originalRemoteAddress: Address,
+ val wrappedHandle: AssociationHandle,
+ val addedSchemeIdentifier: String)
extends AssociationHandle
with SchemeAugmenter {
@@ -166,8 +168,9 @@ abstract class ActorTransportAdapter(wrappedTransport: Transport, system: ActorS
private def registerManager(): Future[ActorRef] =
(system.actorSelection("/system/transports") ? RegisterTransportActor(managerProps, managerName)).mapTo[ActorRef]
- override def interceptListen(listenAddress: Address,
- listenerPromise: Future[AssociationEventListener]): Future[AssociationEventListener] = {
+ override def interceptListen(
+ listenAddress: Address,
+ listenerPromise: Future[AssociationEventListener]): Future[AssociationEventListener] = {
registerManager().map { mgr =>
// Side effecting: storing the manager instance in volatile var
// This is done only once: during the initialization of the protocol stack. The variable manager is not read
diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala
index 5643f9b4c0..c7e206a2e7 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala
@@ -36,11 +36,12 @@ private[remote] object AkkaPduCodec {
case object Heartbeat extends AkkaPdu
final case class Payload(bytes: ByteString) extends AkkaPdu
- final case class Message(recipient: InternalActorRef,
- recipientAddress: Address,
- serializedMessage: SerializedMessage,
- senderOption: OptionVal[ActorRef],
- seqOption: Option[SeqNo])
+ final case class Message(
+ recipient: InternalActorRef,
+ recipientAddress: Address,
+ serializedMessage: SerializedMessage,
+ senderOption: OptionVal[ActorRef],
+ seqOption: Option[SeqNo])
extends HasSequenceNumber {
def reliableDeliveryEnabled = seqOption.isDefined
@@ -94,16 +95,18 @@ private[remote] trait AkkaPduCodec {
def constructHeartbeat: ByteString
- def decodeMessage(raw: ByteString,
- provider: RemoteActorRefProvider,
- localAddress: Address): (Option[Ack], Option[Message])
+ def decodeMessage(
+ raw: ByteString,
+ provider: RemoteActorRefProvider,
+ localAddress: Address): (Option[Ack], Option[Message])
- def constructMessage(localAddress: Address,
- recipient: ActorRef,
- serializedMessage: SerializedMessage,
- senderOption: OptionVal[ActorRef],
- seqOption: Option[SeqNo] = None,
- ackOption: Option[Ack] = None): ByteString
+ def constructMessage(
+ localAddress: Address,
+ recipient: ActorRef,
+ serializedMessage: SerializedMessage,
+ senderOption: OptionVal[ActorRef],
+ seqOption: Option[SeqNo] = None,
+ ackOption: Option[Ack] = None): ByteString
def constructPureAck(ack: Ack): ByteString
}
@@ -123,12 +126,13 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec {
ackBuilder
}
- override def constructMessage(localAddress: Address,
- recipient: ActorRef,
- serializedMessage: SerializedMessage,
- senderOption: OptionVal[ActorRef],
- seqOption: Option[SeqNo] = None,
- ackOption: Option[Ack] = None): ByteString = {
+ override def constructMessage(
+ localAddress: Address,
+ recipient: ActorRef,
+ serializedMessage: SerializedMessage,
+ senderOption: OptionVal[ActorRef],
+ seqOption: Option[SeqNo] = None,
+ ackOption: Option[Ack] = None): ByteString = {
val ackAndEnvelopeBuilder = AckAndEnvelopeContainer.newBuilder
@@ -196,29 +200,32 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec {
}
}
- override def decodeMessage(raw: ByteString,
- provider: RemoteActorRefProvider,
- localAddress: Address): (Option[Ack], Option[Message]) = {
+ override def decodeMessage(
+ raw: ByteString,
+ provider: RemoteActorRefProvider,
+ localAddress: Address): (Option[Ack], Option[Message]) = {
val ackAndEnvelope = AckAndEnvelopeContainer.parseFrom(raw.toArray)
val ackOption = if (ackAndEnvelope.hasAck) {
import scala.collection.JavaConverters._
Some(
- Ack(SeqNo(ackAndEnvelope.getAck.getCumulativeAck),
- ackAndEnvelope.getAck.getNacksList.asScala.map(SeqNo(_)).toSet))
+ Ack(
+ SeqNo(ackAndEnvelope.getAck.getCumulativeAck),
+ ackAndEnvelope.getAck.getNacksList.asScala.map(SeqNo(_)).toSet))
} else None
val messageOption = if (ackAndEnvelope.hasEnvelope) {
val msgPdu = ackAndEnvelope.getEnvelope
Some(
- Message(recipient = provider.resolveActorRefWithLocalAddress(msgPdu.getRecipient.getPath, localAddress),
- recipientAddress = AddressFromURIString(msgPdu.getRecipient.getPath),
- serializedMessage = msgPdu.getMessage,
- senderOption =
- if (msgPdu.hasSender)
- OptionVal(provider.resolveActorRefWithLocalAddress(msgPdu.getSender.getPath, localAddress))
- else OptionVal.None,
- seqOption = if (msgPdu.hasSeq) Some(SeqNo(msgPdu.getSeq)) else None))
+ Message(
+ recipient = provider.resolveActorRefWithLocalAddress(msgPdu.getRecipient.getPath, localAddress),
+ recipientAddress = AddressFromURIString(msgPdu.getRecipient.getPath),
+ serializedMessage = msgPdu.getMessage,
+ senderOption =
+ if (msgPdu.hasSender)
+ OptionVal(provider.resolveActorRefWithLocalAddress(msgPdu.getSender.getPath, localAddress))
+ else OptionVal.None,
+ seqOption = if (msgPdu.hasSeq) Some(SeqNo(msgPdu.getSeq)) else None))
} else None
(ackOption, messageOption)
@@ -247,8 +254,9 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec {
private def decodeAddress(encodedAddress: AddressData): Address =
Address(encodedAddress.getProtocol, encodedAddress.getSystem, encodedAddress.getHostname, encodedAddress.getPort)
- private def constructControlMessagePdu(code: WireFormats.CommandType,
- handshakeInfo: Option[AkkaHandshakeInfo.Builder]): ByteString = {
+ private def constructControlMessagePdu(
+ code: WireFormats.CommandType,
+ handshakeInfo: Option[AkkaHandshakeInfo.Builder]): ByteString = {
val controlMessageBuilder = AkkaControlMessage.newBuilder()
controlMessageBuilder.setCommandType(code)
diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala
index 2c1ffc4002..ae20e601d2 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala
@@ -67,9 +67,10 @@ private[remote] object AkkaProtocolTransport { //Couldn't these go into the Remo
val AkkaOverhead: Int = 0 //Don't know yet
val UniqueId = new java.util.concurrent.atomic.AtomicInteger(0)
- final case class AssociateUnderlyingRefuseUid(remoteAddress: Address,
- statusPromise: Promise[AssociationHandle],
- refuseUid: Option[Int])
+ final case class AssociateUnderlyingRefuseUid(
+ remoteAddress: Address,
+ statusPromise: Promise[AssociationHandle],
+ refuseUid: Option[Int])
extends NoSerializationVerificationNeeded
}
@@ -98,10 +99,11 @@ final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String]
* @param codec
* the codec that will be used to encode/decode Akka PDUs
*/
-private[remote] class AkkaProtocolTransport(wrappedTransport: Transport,
- private val system: ActorSystem,
- private val settings: AkkaProtocolSettings,
- private val codec: AkkaPduCodec)
+private[remote] class AkkaProtocolTransport(
+ wrappedTransport: Transport,
+ private val system: ActorSystem,
+ private val settings: AkkaProtocolSettings,
+ private val codec: AkkaPduCodec)
extends ActorTransportAdapter(wrappedTransport, system) {
override val addedSchemeIdentifier: String = AkkaScheme
@@ -126,8 +128,9 @@ private[remote] class AkkaProtocolTransport(wrappedTransport: Transport,
}
}
-private[transport] class AkkaProtocolManager(private val wrappedTransport: Transport,
- private val settings: AkkaProtocolSettings)
+private[transport] class AkkaProtocolManager(
+ private val wrappedTransport: Transport,
+ private val settings: AkkaProtocolSettings)
extends ActorTransportAdapterManager {
// The AkkaProtocolTransport does not handle the recovery of associations, this task is implemented in the
@@ -148,9 +151,10 @@ private[transport] class AkkaProtocolManager(private val wrappedTransport: Trans
context.actorOf(
RARP(context.system).configureDispatcher(
ProtocolStateActor.inboundProps(
- HandshakeInfo(stateActorLocalAddress,
- AddressUidExtension(context.system).addressUid,
- stateActorSettings.SecureCookie),
+ HandshakeInfo(
+ stateActorLocalAddress,
+ AddressUidExtension(context.system).addressUid,
+ stateActorSettings.SecureCookie),
handle,
stateActorAssociationHandler,
stateActorSettings,
@@ -165,9 +169,10 @@ private[transport] class AkkaProtocolManager(private val wrappedTransport: Trans
}
- private def createOutboundStateActor(remoteAddress: Address,
- statusPromise: Promise[AssociationHandle],
- refuseUid: Option[Int]): Unit = {
+ private def createOutboundStateActor(
+ remoteAddress: Address,
+ statusPromise: Promise[AssociationHandle],
+ refuseUid: Option[Int]): Unit = {
val stateActorLocalAddress = localAddress
val stateActorSettings = settings
@@ -176,9 +181,10 @@ private[transport] class AkkaProtocolManager(private val wrappedTransport: Trans
context.actorOf(
RARP(context.system).configureDispatcher(
ProtocolStateActor.outboundProps(
- HandshakeInfo(stateActorLocalAddress,
- AddressUidExtension(context.system).addressUid,
- stateActorSettings.SecureCookie),
+ HandshakeInfo(
+ stateActorLocalAddress,
+ AddressUidExtension(context.system).addressUid,
+ stateActorSettings.SecureCookie),
remoteAddress,
statusPromise,
stateActorWrappedTransport,
@@ -194,13 +200,14 @@ private[transport] class AkkaProtocolManager(private val wrappedTransport: Trans
}
-private[remote] class AkkaProtocolHandle(_localAddress: Address,
- _remoteAddress: Address,
- val readHandlerPromise: Promise[HandleEventListener],
- _wrappedHandle: AssociationHandle,
- val handshakeInfo: HandshakeInfo,
- private val stateActor: ActorRef,
- private val codec: AkkaPduCodec)
+private[remote] class AkkaProtocolHandle(
+ _localAddress: Address,
+ _remoteAddress: Address,
+ val readHandlerPromise: Promise[HandleEventListener],
+ _wrappedHandle: AssociationHandle,
+ val handshakeInfo: HandshakeInfo,
+ private val stateActor: ActorRef,
+ private val codec: AkkaPduCodec)
extends AbstractTransportAdapterHandle(_localAddress, _remoteAddress, _wrappedHandle, AkkaScheme) {
override def write(payload: ByteString): Boolean = wrappedHandle.write(codec.constructPayload(payload))
@@ -246,14 +253,16 @@ private[transport] object ProtocolStateActor {
trait InitialProtocolStateData extends ProtocolStateData
// Neither the underlying, nor the provided transport is associated
- final case class OutboundUnassociated(remoteAddress: Address,
- statusPromise: Promise[AssociationHandle],
- transport: Transport)
+ final case class OutboundUnassociated(
+ remoteAddress: Address,
+ statusPromise: Promise[AssociationHandle],
+ transport: Transport)
extends InitialProtocolStateData
// The underlying transport is associated, but the handshake of the akka protocol is not yet finished
- final case class OutboundUnderlyingAssociated(statusPromise: Promise[AssociationHandle],
- wrappedHandle: AssociationHandle)
+ final case class OutboundUnderlyingAssociated(
+ statusPromise: Promise[AssociationHandle],
+ wrappedHandle: AssociationHandle)
extends ProtocolStateData
// The underlying transport is associated, but the handshake of the akka protocol is not yet finished
@@ -261,9 +270,10 @@ private[transport] object ProtocolStateActor {
extends InitialProtocolStateData
// Both transports are associated, but the handler for the handle has not yet been provided
- final case class AssociatedWaitHandler(handleListener: Future[HandleEventListener],
- wrappedHandle: AssociationHandle,
- queue: immutable.Queue[ByteString])
+ final case class AssociatedWaitHandler(
+ handleListener: Future[HandleEventListener],
+ wrappedHandle: AssociationHandle,
+ queue: immutable.Queue[ByteString])
extends ProtocolStateData
final case class ListenerReady(listener: HandleEventListener, wrappedHandle: AssociationHandle)
@@ -272,45 +282,50 @@ private[transport] object ProtocolStateActor {
case class TimeoutReason(errorMessage: String)
case object ForbiddenUidReason
- private[remote] def outboundProps(handshakeInfo: HandshakeInfo,
- remoteAddress: Address,
- statusPromise: Promise[AssociationHandle],
- transport: Transport,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
- failureDetector: FailureDetector,
- refuseUid: Option[Int]): Props =
- Props(classOf[ProtocolStateActor],
- handshakeInfo,
- remoteAddress,
- statusPromise,
- transport,
- settings,
- codec,
- failureDetector,
- refuseUid).withDeploy(Deploy.local)
+ private[remote] def outboundProps(
+ handshakeInfo: HandshakeInfo,
+ remoteAddress: Address,
+ statusPromise: Promise[AssociationHandle],
+ transport: Transport,
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
+ failureDetector: FailureDetector,
+ refuseUid: Option[Int]): Props =
+ Props(
+ classOf[ProtocolStateActor],
+ handshakeInfo,
+ remoteAddress,
+ statusPromise,
+ transport,
+ settings,
+ codec,
+ failureDetector,
+ refuseUid).withDeploy(Deploy.local)
- private[remote] def inboundProps(handshakeInfo: HandshakeInfo,
- wrappedHandle: AssociationHandle,
- associationListener: AssociationEventListener,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
- failureDetector: FailureDetector): Props =
- Props(classOf[ProtocolStateActor],
- handshakeInfo,
- wrappedHandle,
- associationListener,
- settings,
- codec,
- failureDetector).withDeploy(Deploy.local)
+ private[remote] def inboundProps(
+ handshakeInfo: HandshakeInfo,
+ wrappedHandle: AssociationHandle,
+ associationListener: AssociationEventListener,
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
+ failureDetector: FailureDetector): Props =
+ Props(
+ classOf[ProtocolStateActor],
+ handshakeInfo,
+ wrappedHandle,
+ associationListener,
+ settings,
+ codec,
+ failureDetector).withDeploy(Deploy.local)
}
-private[transport] class ProtocolStateActor(initialData: InitialProtocolStateData,
- private val localHandshakeInfo: HandshakeInfo,
- private val refuseUid: Option[Int],
- private val settings: AkkaProtocolSettings,
- private val codec: AkkaPduCodec,
- private val failureDetector: FailureDetector)
+private[transport] class ProtocolStateActor(
+ initialData: InitialProtocolStateData,
+ private val localHandshakeInfo: HandshakeInfo,
+ private val refuseUid: Option[Int],
+ private val settings: AkkaProtocolSettings,
+ private val codec: AkkaPduCodec,
+ private val failureDetector: FailureDetector)
extends Actor
with FSM[AssociationState, ProtocolStateData]
with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
@@ -321,35 +336,39 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
import context.dispatcher
// Outbound case
- def this(handshakeInfo: HandshakeInfo,
- remoteAddress: Address,
- statusPromise: Promise[AssociationHandle],
- transport: Transport,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
- failureDetector: FailureDetector,
- refuseUid: Option[Int]) = {
- this(OutboundUnassociated(remoteAddress, statusPromise, transport),
- handshakeInfo,
- refuseUid,
- settings,
- codec,
- failureDetector)
+ def this(
+ handshakeInfo: HandshakeInfo,
+ remoteAddress: Address,
+ statusPromise: Promise[AssociationHandle],
+ transport: Transport,
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
+ failureDetector: FailureDetector,
+ refuseUid: Option[Int]) = {
+ this(
+ OutboundUnassociated(remoteAddress, statusPromise, transport),
+ handshakeInfo,
+ refuseUid,
+ settings,
+ codec,
+ failureDetector)
}
// Inbound case
- def this(handshakeInfo: HandshakeInfo,
- wrappedHandle: AssociationHandle,
- associationListener: AssociationEventListener,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
- failureDetector: FailureDetector) = {
- this(InboundUnassociated(associationListener, wrappedHandle),
- handshakeInfo,
- refuseUid = None,
- settings,
- codec,
- failureDetector)
+ def this(
+ handshakeInfo: HandshakeInfo,
+ wrappedHandle: AssociationHandle,
+ associationListener: AssociationEventListener,
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
+ failureDetector: FailureDetector) = {
+ this(
+ InboundUnassociated(associationListener, wrappedHandle),
+ handshakeInfo,
+ refuseUid = None,
+ settings,
+ codec,
+ failureDetector)
}
val localAddress = localHandshakeInfo.origin
@@ -384,10 +403,11 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
} else {
// Underlying transport was busy -- Associate could not be sent
- setTimer("associate-retry",
- Handle(wrappedHandle),
- RARP(context.system).provider.remoteSettings.BackoffPeriod,
- repeat = false)
+ setTimer(
+ "associate-retry",
+ Handle(wrappedHandle),
+ RARP(context.system).provider.remoteSettings.BackoffPeriod,
+ repeat = false)
stay()
}
@@ -419,9 +439,10 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
failureDetector.heartbeat()
cancelTimer(handshakeTimerKey)
goto(Open).using(
- AssociatedWaitHandler(notifyOutboundHandler(wrappedHandle, handshakeInfo, statusPromise),
- wrappedHandle,
- immutable.Queue.empty))
+ AssociatedWaitHandler(
+ notifyOutboundHandler(wrappedHandle, handshakeInfo, statusPromise),
+ wrappedHandle,
+ immutable.Queue.empty))
case Disassociate(info) =>
// After receiving Disassociate we MUST NOT send back a Disassociate (loop)
@@ -455,19 +476,22 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
initHeartbeatTimer()
cancelTimer(handshakeTimerKey)
goto(Open).using(
- AssociatedWaitHandler(notifyInboundHandler(wrappedHandle, info, associationHandler),
- wrappedHandle,
- immutable.Queue.empty))
+ AssociatedWaitHandler(
+ notifyInboundHandler(wrappedHandle, info, associationHandler),
+ wrappedHandle,
+ immutable.Queue.empty))
} else {
if (log.isDebugEnabled)
- log.warning(s"Association attempt with mismatching cookie from [{}]. Expected [{}] but received [{}].",
- info.origin,
- localHandshakeInfo.cookie.getOrElse(""),
- info.cookie.getOrElse(""))
+ log.warning(
+ s"Association attempt with mismatching cookie from [{}]. Expected [{}] but received [{}].",
+ info.origin,
+ localHandshakeInfo.cookie.getOrElse(""),
+ info.cookie.getOrElse(""))
else
- markerLog.warning(LogMarker.Security,
- s"Association attempt with mismatching cookie from [{}].",
- info.origin)
+ markerLog.warning(
+ LogMarker.Security,
+ s"Association attempt with mismatching cookie from [{}].",
+ info.origin)
stop()
}
@@ -485,9 +509,10 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
case Event(HandshakeTimer, OutboundUnderlyingAssociated(_, wrappedHandle)) =>
if (log.isDebugEnabled)
- log.debug("Sending disassociate to [{}] because handshake timed out for outbound association after [{}] ms.",
- wrappedHandle,
- settings.HandshakeTimeout.toMillis)
+ log.debug(
+ "Sending disassociate to [{}] because handshake timed out for outbound association after [{}] ms.",
+ wrappedHandle,
+ settings.HandshakeTimeout.toMillis)
sendDisassociate(wrappedHandle, Unknown)
stop(
@@ -496,9 +521,10 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
case Event(HandshakeTimer, InboundUnassociated(_, wrappedHandle)) =>
if (log.isDebugEnabled)
- log.debug("Sending disassociate to [{}] because handshake timed out for inbound association after [{}] ms.",
- wrappedHandle,
- settings.HandshakeTimeout.toMillis)
+ log.debug(
+ "Sending disassociate to [{}] because handshake timed out for inbound association after [{}] ms.",
+ wrappedHandle,
+ settings.HandshakeTimeout.toMillis)
sendDisassociate(wrappedHandle, Unknown)
stop(
@@ -576,9 +602,10 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
stay()
} else {
if (log.isDebugEnabled)
- log.debug("Sending disassociate to [{}] because failure detector triggered in state [{}]",
- wrappedHandle,
- stateName)
+ log.debug(
+ "Sending disassociate to [{}] because failure detector triggered in state [{}]",
+ wrappedHandle,
+ stateName)
// send disassociate just to be sure
sendDisassociate(wrappedHandle, Unknown)
@@ -663,38 +690,42 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
private def listenForListenerRegistration(readHandlerPromise: Promise[HandleEventListener]): Unit =
readHandlerPromise.future.map { HandleListenerRegistered(_) }.pipeTo(self)
- private def notifyOutboundHandler(wrappedHandle: AssociationHandle,
- handshakeInfo: HandshakeInfo,
- statusPromise: Promise[AssociationHandle]): Future[HandleEventListener] = {
+ private def notifyOutboundHandler(
+ wrappedHandle: AssociationHandle,
+ handshakeInfo: HandshakeInfo,
+ statusPromise: Promise[AssociationHandle]): Future[HandleEventListener] = {
val readHandlerPromise = Promise[HandleEventListener]()
listenForListenerRegistration(readHandlerPromise)
statusPromise.success(
- new AkkaProtocolHandle(localAddress,
- wrappedHandle.remoteAddress,
- readHandlerPromise,
- wrappedHandle,
- handshakeInfo,
- self,
- codec))
+ new AkkaProtocolHandle(
+ localAddress,
+ wrappedHandle.remoteAddress,
+ readHandlerPromise,
+ wrappedHandle,
+ handshakeInfo,
+ self,
+ codec))
readHandlerPromise.future
}
- private def notifyInboundHandler(wrappedHandle: AssociationHandle,
- handshakeInfo: HandshakeInfo,
- associationListener: AssociationEventListener): Future[HandleEventListener] = {
+ private def notifyInboundHandler(
+ wrappedHandle: AssociationHandle,
+ handshakeInfo: HandshakeInfo,
+ associationListener: AssociationEventListener): Future[HandleEventListener] = {
val readHandlerPromise = Promise[HandleEventListener]()
listenForListenerRegistration(readHandlerPromise)
associationListener.notify(
InboundAssociation(
- new AkkaProtocolHandle(localAddress,
- handshakeInfo.origin,
- readHandlerPromise,
- wrappedHandle,
- handshakeInfo,
- self,
- codec)))
+ new AkkaProtocolHandle(
+ localAddress,
+ handshakeInfo.origin,
+ readHandlerPromise,
+ wrappedHandle,
+ handshakeInfo,
+ self,
+ codec)))
readHandlerPromise.future
}
diff --git a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala
index 1ee57e5f54..32bf87081d 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala
@@ -55,8 +55,9 @@ private[remote] object FailureInjectorTransportAdapter {
/**
* INTERNAL API
*/
-private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transport,
- val extendedSystem: ExtendedActorSystem)
+private[remote] class FailureInjectorTransportAdapter(
+ wrappedTransport: Transport,
+ val extendedSystem: ExtendedActorSystem)
extends AbstractTransportAdapter(wrappedTransport)(extendedSystem.dispatcher)
with AssociationEventListener {
@@ -82,8 +83,9 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor
case _ => wrappedTransport.managementCommand(cmd)
}
- protected def interceptListen(listenAddress: Address,
- listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = {
+ protected def interceptListen(
+ listenAddress: Address,
+ listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = {
log.warning("FailureInjectorTransport is active on this system. Gremlins might munch your packets.")
listenerFuture.foreach {
// Side effecting: As this class is not an actor, the only way to safely modify state is through volatile vars.
@@ -97,9 +99,10 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor
protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit = {
// Association is simulated to be failed if there was either an inbound or outbound message drop
- if (shouldDropInbound(remoteAddress, Unit, "interceptAssociate") || shouldDropOutbound(remoteAddress,
- Unit,
- "interceptAssociate"))
+ if (shouldDropInbound(remoteAddress, Unit, "interceptAssociate") || shouldDropOutbound(
+ remoteAddress,
+ Unit,
+ "interceptAssociate"))
statusPromise.failure(new FailureInjectorException("Simulated failure of association to " + remoteAddress))
else
statusPromise.completeWith(wrappedTransport.associate(remoteAddress).map { handle =>
@@ -153,8 +156,9 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor
/**
* INTERNAL API
*/
-private[remote] final case class FailureInjectorHandle(_wrappedHandle: AssociationHandle,
- private val gremlinAdapter: FailureInjectorTransportAdapter)
+private[remote] final case class FailureInjectorHandle(
+ _wrappedHandle: AssociationHandle,
+ private val gremlinAdapter: FailureInjectorTransportAdapter)
extends AbstractTransportAdapterHandle(_wrappedHandle, FailureInjectorSchemeIdentifier)
with HandleEventListener {
import gremlinAdapter.extendedSystem.dispatcher
diff --git a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala
index 66f9ee04e7..0fa0c2562b 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala
@@ -25,17 +25,19 @@ import scala.concurrent.ExecutionContext.Implicits.global
* requested to do. This class is not optimized for performance and MUST not be used as an in-memory transport in
* production systems.
*/
-class TestTransport(val localAddress: Address,
- final val registry: AssociationRegistry,
- val maximumPayloadBytes: Int = 32000,
- val schemeIdentifier: String = "test")
+class TestTransport(
+ val localAddress: Address,
+ final val registry: AssociationRegistry,
+ val maximumPayloadBytes: Int = 32000,
+ val schemeIdentifier: String = "test")
extends Transport {
def this(system: ExtendedActorSystem, conf: Config) = {
- this(AddressFromURIString(conf.getString("local-address")),
- AssociationRegistry.get(conf.getString("registry-key")),
- conf.getBytes("maximum-payload-bytes").toInt,
- conf.getString("scheme-identifier"))
+ this(
+ AddressFromURIString(conf.getString("local-address")),
+ AssociationRegistry.get(conf.getString("registry-key")),
+ conf.getBytes("maximum-payload-bytes").toInt,
+ conf.getString("scheme-identifier"))
}
override def isResponsibleFor(address: Address): Boolean = true
@@ -81,8 +83,9 @@ class TestTransport(val localAddress: Address,
}
}
- private def createHandlePair(remoteTransport: TestTransport,
- remoteAddress: Address): (TestAssociationHandle, TestAssociationHandle) = {
+ private def createHandlePair(
+ remoteTransport: TestTransport,
+ remoteAddress: Address): (TestAssociationHandle, TestAssociationHandle) = {
val localHandle = new TestAssociationHandle(localAddress, remoteAddress, this, inbound = false)
val remoteHandle = new TestAssociationHandle(remoteAddress, localAddress, remoteTransport, inbound = true)
@@ -296,8 +299,9 @@ object TestTransport {
* @param listenerPair pair of listeners in initiator, receiver order.
* @return
*/
- def remoteListenerRelativeTo(handle: TestAssociationHandle,
- listenerPair: (HandleEventListener, HandleEventListener)): HandleEventListener = {
+ def remoteListenerRelativeTo(
+ handle: TestAssociationHandle,
+ listenerPair: (HandleEventListener, HandleEventListener)): HandleEventListener = {
listenerPair match {
case (initiator, receiver) => if (handle.inbound) initiator else receiver
}
@@ -341,8 +345,9 @@ object TestTransport {
* @param associationEventListenerFuture
* The future that will be completed with the listener that will handle the events for the given transport.
*/
- def registerTransport(transport: TestTransport,
- associationEventListenerFuture: Future[AssociationEventListener]): Unit = {
+ def registerTransport(
+ transport: TestTransport,
+ associationEventListenerFuture: Future[AssociationEventListener]): Unit = {
transportTable.put(transport.localAddress, (transport, associationEventListenerFuture))
}
@@ -445,10 +450,11 @@ object AssociationRegistry {
def clear(): Unit = this.synchronized { registries.clear() }
}
-final case class TestAssociationHandle(localAddress: Address,
- remoteAddress: Address,
- transport: TestTransport,
- inbound: Boolean)
+final case class TestAssociationHandle(
+ localAddress: Address,
+ remoteAddress: Address,
+ transport: TestTransport,
+ inbound: Boolean)
extends AssociationHandle {
@volatile var writable = true
diff --git a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala
index 1d9d01e427..c253627164 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala
@@ -115,9 +115,11 @@ object ThrottlerTransportAdapter {
override def tryConsumeTokens(nanoTimeOfSend: Long, tokens: Int): (ThrottleMode, Boolean) = {
if (isAvailable(nanoTimeOfSend, tokens))
- (this.copy(nanoTimeOfLastSend = nanoTimeOfSend,
- availableTokens = min(availableTokens - tokens + tokensGenerated(nanoTimeOfSend), capacity)),
- true)
+ (
+ this.copy(
+ nanoTimeOfLastSend = nanoTimeOfSend,
+ availableTokens = min(availableTokens - tokens + tokensGenerated(nanoTimeOfSend), capacity)),
+ true)
else (this, false)
}
@@ -328,9 +330,10 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport)
}
}
- private def setMode(handle: ThrottlerHandle,
- mode: ThrottleMode,
- direction: Direction): Future[SetThrottleAck.type] = {
+ private def setMode(
+ handle: ThrottlerHandle,
+ mode: ThrottleMode,
+ direction: Direction): Future[SetThrottleAck.type] = {
if (direction.includes(Direction.Send))
handle.outboundThrottleMode.set(mode)
if (direction.includes(Direction.Receive))
@@ -354,17 +357,18 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport)
}
}
- private def wrapHandle(originalHandle: AssociationHandle,
- listener: AssociationEventListener,
- inbound: Boolean): ThrottlerHandle = {
+ private def wrapHandle(
+ originalHandle: AssociationHandle,
+ listener: AssociationEventListener,
+ inbound: Boolean): ThrottlerHandle = {
val managerRef = self
- ThrottlerHandle(originalHandle,
- context.actorOf(
- RARP(context.system)
- .configureDispatcher(
- Props(classOf[ThrottledAssociation], managerRef, listener, originalHandle, inbound))
- .withDeploy(Deploy.local),
- "throttler" + nextId()))
+ ThrottlerHandle(
+ originalHandle,
+ context.actorOf(
+ RARP(context.system)
+ .configureDispatcher(Props(classOf[ThrottledAssociation], managerRef, listener, originalHandle, inbound))
+ .withDeploy(Deploy.local),
+ "throttler" + nextId()))
}
}
@@ -408,10 +412,11 @@ private[transport] object ThrottledAssociation {
/**
* INTERNAL API
*/
-private[transport] class ThrottledAssociation(val manager: ActorRef,
- val associationHandler: AssociationEventListener,
- val originalHandle: AssociationHandle,
- val inbound: Boolean)
+private[transport] class ThrottledAssociation(
+ val manager: ActorRef,
+ val associationHandler: AssociationEventListener,
+ val originalHandle: AssociationHandle,
+ val inbound: Boolean)
extends Actor
with LoggingFSM[ThrottledAssociation.ThrottlerState, ThrottledAssociation.ThrottlerData]
with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
diff --git a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala
index 6b69a1c07d..72b30bc0be 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala
@@ -267,8 +267,9 @@ trait AssociationHandle {
* could be called arbitrarily many times.
*
*/
- @deprecated(message = "Use method that states reasons to make sure disassociation reasons are logged.",
- since = "2.5.3")
+ @deprecated(
+ message = "Use method that states reasons to make sure disassociation reasons are logged.",
+ since = "2.5.3")
def disassociate(): Unit
/**
@@ -279,10 +280,11 @@ trait AssociationHandle {
*/
def disassociate(reason: String, log: LoggingAdapter): Unit = {
if (log.isDebugEnabled)
- log.debug("Association between local [{}] and remote [{}] was disassociated because {}",
- localAddress,
- remoteAddress,
- reason)
+ log.debug(
+ "Association between local [{}] and remote [{}] was disassociated because {}",
+ localAddress,
+ remoteAddress,
+ reason)
disassociate()
}
diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala
index 3004780358..7b5e46016b 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala
@@ -193,9 +193,10 @@ class NettyTransportSettings(config: Config) {
val ClientSocketWorkerPoolSize: Int = computeWPS(config.getConfig("client-socket-worker-pool"))
private def computeWPS(config: Config): Int =
- ThreadPoolConfig.scaledPoolSize(config.getInt("pool-size-min"),
- config.getDouble("pool-size-factor"),
- config.getInt("pool-size-max"))
+ ThreadPoolConfig.scaledPoolSize(
+ config.getInt("pool-size-min"),
+ config.getDouble("pool-size-factor"),
+ config.getInt("pool-size-max"))
// Check Netty version >= 3.10.6
{
@@ -230,21 +231,24 @@ private[netty] trait CommonHandlers extends NettyHelpers {
protected def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle
- protected def registerListener(channel: Channel,
- listener: HandleEventListener,
- msg: ChannelBuffer,
- remoteSocketAddress: InetSocketAddress): Unit
+ protected def registerListener(
+ channel: Channel,
+ listener: HandleEventListener,
+ msg: ChannelBuffer,
+ remoteSocketAddress: InetSocketAddress): Unit
- final protected def init(channel: Channel,
- remoteSocketAddress: SocketAddress,
- remoteAddress: Address,
- msg: ChannelBuffer)(op: (AssociationHandle => Any)): Unit = {
+ final protected def init(
+ channel: Channel,
+ remoteSocketAddress: SocketAddress,
+ remoteAddress: Address,
+ msg: ChannelBuffer)(op: (AssociationHandle => Any)): Unit = {
import transport._
- NettyTransport.addressFromSocketAddress(channel.getLocalAddress,
- schemeIdentifier,
- system.name,
- Some(settings.Hostname),
- None) match {
+ NettyTransport.addressFromSocketAddress(
+ channel.getLocalAddress,
+ schemeIdentifier,
+ system.name,
+ Some(settings.Hostname),
+ None) match {
case Some(localAddress) =>
val handle = createHandle(channel, localAddress, remoteAddress)
handle.readHandlerPromise.future.foreach { listener =>
@@ -273,11 +277,12 @@ private[netty] abstract class ServerHandler(
channel.setReadable(false)
associationListenerFuture.foreach { listener =>
val remoteAddress = NettyTransport
- .addressFromSocketAddress(remoteSocketAddress,
- transport.schemeIdentifier,
- transport.system.name,
- hostName = None,
- port = None)
+ .addressFromSocketAddress(
+ remoteSocketAddress,
+ transport.schemeIdentifier,
+ transport.system.name,
+ hostName = None,
+ port = None)
.getOrElse(throw new NettyTransportException(
s"Unknown inbound remote address type [${remoteSocketAddress.getClass.getName}]"))
init(channel, remoteSocketAddress, remoteAddress, msg) { a =>
@@ -319,21 +324,23 @@ private[transport] object NettyTransport {
val uniqueIdCounter = new AtomicInteger(0)
- def addressFromSocketAddress(addr: SocketAddress,
- schemeIdentifier: String,
- systemName: String,
- hostName: Option[String],
- port: Option[Int]): Option[Address] = addr match {
+ def addressFromSocketAddress(
+ addr: SocketAddress,
+ schemeIdentifier: String,
+ systemName: String,
+ hostName: Option[String],
+ port: Option[Int]): Option[Address] = addr match {
case sa: InetSocketAddress =>
Some(Address(schemeIdentifier, systemName, hostName.getOrElse(sa.getHostString), port.getOrElse(sa.getPort)))
case _ => None
}
// Need to do like this for binary compatibility reasons
- def addressFromSocketAddress(addr: SocketAddress,
- schemeIdentifier: String,
- systemName: String,
- hostName: Option[String]): Option[Address] =
+ def addressFromSocketAddress(
+ addr: SocketAddress,
+ schemeIdentifier: String,
+ systemName: String,
+ hostName: Option[String]): Option[Address] =
addressFromSocketAddress(addr, schemeIdentifier, systemName, hostName, port = None)
}
@@ -388,10 +395,11 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA
val boss, worker = createExecutorService()
// We need to create a HashedWheelTimer here since Netty creates one with a thread that
// doesn't respect the akka.daemonic setting
- new NioClientSocketChannelFactory(boss,
- 1,
- new NioWorkerPool(worker, ClientSocketWorkerPoolSize),
- new HashedWheelTimer(system.threadFactory))
+ new NioClientSocketChannelFactory(
+ boss,
+ 1,
+ new NioWorkerPool(worker, ClientSocketWorkerPoolSize),
+ new HashedWheelTimer(system.threadFactory))
case Udp =>
// This does not create a HashedWheelTimer internally
new NioDatagramChannelFactory(createExecutorService(), ClientSocketWorkerPoolSize)
@@ -411,13 +419,15 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA
val pipeline = new DefaultChannelPipeline
if (!isDatagram) {
- pipeline.addLast("FrameDecoder",
- new LengthFieldBasedFrameDecoder(maximumPayloadBytes,
- 0,
- FrameLengthFieldLength,
- 0,
- FrameLengthFieldLength, // Strip the header
- true))
+ pipeline.addLast(
+ "FrameDecoder",
+ new LengthFieldBasedFrameDecoder(
+ maximumPayloadBytes,
+ 0,
+ FrameLengthFieldLength,
+ 0,
+ FrameLengthFieldLength, // Strip the header
+ true))
pipeline.addLast("FrameEncoder", new LengthFieldPrepender(FrameLengthFieldLength))
}
@@ -483,8 +493,9 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA
bootstrap.setOption("child.keepAlive", settings.TcpKeepalive)
bootstrap.setOption("reuseAddress", settings.TcpReuseAddr)
if (isDatagram)
- bootstrap.setOption("receiveBufferSizePredictorFactory",
- new FixedReceiveBufferSizePredictorFactory(ReceiveBufferSize.get))
+ bootstrap.setOption(
+ "receiveBufferSizePredictorFactory",
+ new FixedReceiveBufferSizePredictorFactory(ReceiveBufferSize.get))
settings.ReceiveBufferSize.foreach(sz => bootstrap.setOption("receiveBufferSize", sz))
settings.SendBufferSize.foreach(sz => bootstrap.setOption("sendBufferSize", sz))
settings.WriteBufferHighWaterMark.foreach(sz => bootstrap.setOption("writeBufferHighWaterMark", sz))
@@ -534,11 +545,12 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA
serverChannel = newServerChannel
- addressFromSocketAddress(newServerChannel.getLocalAddress,
- schemeIdentifier,
- system.name,
- Some(settings.Hostname),
- if (settings.PortSelector == 0) None else Some(settings.PortSelector)) match {
+ addressFromSocketAddress(
+ newServerChannel.getLocalAddress,
+ schemeIdentifier,
+ system.name,
+ Some(settings.Hostname),
+ if (settings.PortSelector == 0) None else Some(settings.PortSelector)) match {
case Some(address) =>
addressFromSocketAddress(newServerChannel.getLocalAddress, schemeIdentifier, system.name, None, None) match {
case Some(address) => boundTo = address
diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala
index 3f3d30911c..1bf95b0bd0 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala
@@ -45,8 +45,9 @@ import javax.net.ssl.TrustManagerFactory
extends SSLEngineProvider {
def this(system: ActorSystem) =
- this(Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName),
- new SSLSettings(system.settings.config.getConfig("akka.remote.netty.ssl.security")))
+ this(
+ Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName),
+ new SSLSettings(system.settings.config.getConfig("akka.remote.netty.ssl.security")))
import settings._
diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala
index 369d8132d1..0c38191c70 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala
@@ -33,10 +33,11 @@ private[remote] trait TcpHandlers extends CommonHandlers {
import ChannelLocalActor._
- override def registerListener(channel: Channel,
- listener: HandleEventListener,
- msg: ChannelBuffer,
- remoteSocketAddress: InetSocketAddress): Unit =
+ override def registerListener(
+ channel: Channel,
+ listener: HandleEventListener,
+ msg: ChannelBuffer,
+ remoteSocketAddress: InetSocketAddress): Unit =
ChannelLocalActor.set(channel, Some(listener))
override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle =
@@ -62,9 +63,10 @@ private[remote] trait TcpHandlers extends CommonHandlers {
/**
* INTERNAL API
*/
-private[remote] class TcpServerHandler(_transport: NettyTransport,
- _associationListenerFuture: Future[AssociationEventListener],
- val log: LoggingAdapter)
+private[remote] class TcpServerHandler(
+ _transport: NettyTransport,
+ _associationListenerFuture: Future[AssociationEventListener],
+ val log: LoggingAdapter)
extends ServerHandler(_transport, _associationListenerFuture)
with TcpHandlers {
@@ -88,10 +90,11 @@ private[remote] class TcpClientHandler(_transport: NettyTransport, remoteAddress
/**
* INTERNAL API
*/
-private[remote] class TcpAssociationHandle(val localAddress: Address,
- val remoteAddress: Address,
- val transport: NettyTransport,
- private val channel: Channel)
+private[remote] class TcpAssociationHandle(
+ val localAddress: Address,
+ val remoteAddress: Address,
+ val transport: NettyTransport,
+ private val channel: Channel)
extends AssociationHandle {
import transport.executionContext
diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala
index d689e7ecd2..a98ef0bf49 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala
@@ -23,10 +23,11 @@ private[remote] trait UdpHandlers extends CommonHandlers {
override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle =
new UdpAssociationHandle(localAddress, remoteAddress, channel, transport)
- override def registerListener(channel: Channel,
- listener: HandleEventListener,
- msg: ChannelBuffer,
- remoteSocketAddress: InetSocketAddress): Unit = {
+ override def registerListener(
+ channel: Channel,
+ listener: HandleEventListener,
+ msg: ChannelBuffer,
+ remoteSocketAddress: InetSocketAddress): Unit = {
transport.udpConnectionTable.putIfAbsent(remoteSocketAddress, listener) match {
case null => listener.notify(InboundPayload(ByteString(msg.array())))
case oldReader =>
@@ -55,8 +56,9 @@ private[remote] trait UdpHandlers extends CommonHandlers {
* INTERNAL API
*/
@deprecated("Deprecated in favour of Artery (the new Aeron/UDP based remoting implementation).", since = "2.5.0")
-private[remote] class UdpServerHandler(_transport: NettyTransport,
- _associationListenerFuture: Future[AssociationEventListener])
+private[remote] class UdpServerHandler(
+ _transport: NettyTransport,
+ _associationListenerFuture: Future[AssociationEventListener])
extends ServerHandler(_transport, _associationListenerFuture)
with UdpHandlers {
@@ -85,10 +87,11 @@ private[remote] class UdpClientHandler(_transport: NettyTransport, remoteAddress
/**
* INTERNAL API
*/
-private[remote] class UdpAssociationHandle(val localAddress: Address,
- val remoteAddress: Address,
- private val channel: Channel,
- private val transport: NettyTransport)
+private[remote] class UdpAssociationHandle(
+ val localAddress: Address,
+ val remoteAddress: Address,
+ private val channel: Channel,
+ private val transport: NettyTransport)
extends AssociationHandle {
override val readHandlerPromise: Promise[HandleEventListener] = Promise()
diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
index 5d99951317..46d6fa5403 100644
--- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
@@ -23,17 +23,19 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") {
}
}
- def createFailureDetector(threshold: Double = 8.0,
- maxSampleSize: Int = 1000,
- minStdDeviation: FiniteDuration = 100.millis,
- acceptableLostDuration: FiniteDuration = Duration.Zero,
- firstHeartbeatEstimate: FiniteDuration = 1.second,
- clock: Clock = FailureDetector.defaultClock) =
- new PhiAccrualFailureDetector(threshold,
- maxSampleSize,
- minStdDeviation,
- acceptableLostDuration,
- firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock)
+ def createFailureDetector(
+ threshold: Double = 8.0,
+ maxSampleSize: Int = 1000,
+ minStdDeviation: FiniteDuration = 100.millis,
+ acceptableLostDuration: FiniteDuration = Duration.Zero,
+ firstHeartbeatEstimate: FiniteDuration = 1.second,
+ clock: Clock = FailureDetector.defaultClock) =
+ new PhiAccrualFailureDetector(
+ threshold,
+ maxSampleSize,
+ minStdDeviation,
+ acceptableLostDuration,
+ firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock)
def cdf(phi: Double) = 1.0 - math.pow(10, -phi)
@@ -68,9 +70,8 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") {
}
// larger stdDeviation results => lower phi
- fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 500.0) should be < (fd.phi(timeDiff = 1100,
- mean = 1000.0,
- stdDeviation = 100.0))
+ fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 500.0) should be < (fd
+ .phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 100.0))
}
"return phi value of 0.0 on startup for each address, when no heartbeats" in {
@@ -128,9 +129,10 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") {
// 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger unreachable again
val regularIntervals = 0L +: Vector.fill(999)(1000L)
val timeIntervals = regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L
- val fd = createFailureDetector(threshold = 8,
- acceptableLostDuration = 3.seconds,
- clock = fakeTimeGenerator(timeIntervals))
+ val fd = createFailureDetector(
+ threshold = 8,
+ acceptableLostDuration = 3.seconds,
+ clock = fakeTimeGenerator(timeIntervals))
for (_ <- 0 until 1000) fd.heartbeat()
fd.isAvailable should ===(false) // after the long pause
diff --git a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala
index b99cb4cb99..d21fbad6b2 100644
--- a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala
@@ -19,9 +19,9 @@ class DaemonicSpec extends AkkaSpec {
// get all threads running before actor system is started
val origThreads: Set[Thread] = Thread.getAllStackTraces.keySet().asScala.to(Set)
// create a separate actor system that we can check the threads for
- val daemonicSystem = ActorSystem("daemonic",
- ConfigFactory.parseString(
- """
+ val daemonicSystem = ActorSystem(
+ "daemonic",
+ ConfigFactory.parseString("""
akka.daemonic = on
akka.actor.provider = remote
akka.remote.netty.tcp.transport-class = "akka.remote.transport.netty.NettyTransport"
diff --git a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala
index e14eb5d6ba..551fe63a25 100644
--- a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala
@@ -20,32 +20,36 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") {
}
}
- def createFailureDetector(threshold: Double = 8.0,
- maxSampleSize: Int = 1000,
- minStdDeviation: FiniteDuration = 10.millis,
- acceptableLostDuration: FiniteDuration = Duration.Zero,
- firstHeartbeatEstimate: FiniteDuration = 1.second,
- clock: Clock = FailureDetector.defaultClock) =
- new PhiAccrualFailureDetector(threshold,
- maxSampleSize,
- minStdDeviation,
- acceptableLostDuration,
- firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock)
+ def createFailureDetector(
+ threshold: Double = 8.0,
+ maxSampleSize: Int = 1000,
+ minStdDeviation: FiniteDuration = 10.millis,
+ acceptableLostDuration: FiniteDuration = Duration.Zero,
+ firstHeartbeatEstimate: FiniteDuration = 1.second,
+ clock: Clock = FailureDetector.defaultClock) =
+ new PhiAccrualFailureDetector(
+ threshold,
+ maxSampleSize,
+ minStdDeviation,
+ acceptableLostDuration,
+ firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock)
- def createFailureDetectorRegistry(threshold: Double = 8.0,
- maxSampleSize: Int = 1000,
- minStdDeviation: FiniteDuration = 10.millis,
- acceptableLostDuration: FiniteDuration = Duration.Zero,
- firstHeartbeatEstimate: FiniteDuration = 1.second,
- clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = {
+ def createFailureDetectorRegistry(
+ threshold: Double = 8.0,
+ maxSampleSize: Int = 1000,
+ minStdDeviation: FiniteDuration = 10.millis,
+ acceptableLostDuration: FiniteDuration = Duration.Zero,
+ firstHeartbeatEstimate: FiniteDuration = 1.second,
+ clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = {
new DefaultFailureDetectorRegistry[String](
() =>
- createFailureDetector(threshold,
- maxSampleSize,
- minStdDeviation,
- acceptableLostDuration,
- firstHeartbeatEstimate,
- clock))
+ createFailureDetector(
+ threshold,
+ maxSampleSize,
+ minStdDeviation,
+ acceptableLostDuration,
+ firstHeartbeatEstimate,
+ clock))
}
"mark node as available after a series of successful heartbeats" in {
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala
index d841d72708..fc06584e79 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala
@@ -46,8 +46,9 @@ class RemoteConfigSpec extends AkkaSpec("""
Transports.head._1 should ===(classOf[akka.remote.transport.netty.NettyTransport].getName)
Transports.head._2 should ===(Nil)
Adapters should ===(
- Map("gremlin" -> classOf[akka.remote.transport.FailureInjectorProvider].getName,
- "trttl" -> classOf[akka.remote.transport.ThrottlerProvider].getName))
+ Map(
+ "gremlin" -> classOf[akka.remote.transport.FailureInjectorProvider].getName,
+ "trttl" -> classOf[akka.remote.transport.ThrottlerProvider].getName))
WatchFailureDetectorImplementationClass should ===(classOf[PhiAccrualFailureDetector].getName)
WatchHeartBeatInterval should ===(1 seconds)
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala
index 67a24028e0..4542d7e389 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala
@@ -98,12 +98,13 @@ akka {
// This forces ReliableDeliverySupervisor to start with unknown remote system UID.
val extinctPath = RootActorPath(Address(protocol, "extinct-system", "localhost", SocketUtil.temporaryLocalPort())) / "user" / "noone"
val transport = RARP(system).provider.transport
- val extinctRef = new RemoteActorRef(transport,
- transport.localAddressForRemote(extinctPath.address),
- extinctPath,
- Nobody,
- props = None,
- deploy = None)
+ val extinctRef = new RemoteActorRef(
+ transport,
+ transport.localAddressForRemote(extinctPath.address),
+ extinctPath,
+ Nobody,
+ props = None,
+ deploy = None)
val probe = TestProbe()
probe.watch(extinctRef)
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala
index f35a7d862d..f7bb6c2eda 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala
@@ -11,7 +11,8 @@ import com.typesafe.config._
import akka.ConfigurationException
object RemoteDeployerSpec {
- val deployerConf = ConfigFactory.parseString("""
+ val deployerConf = ConfigFactory.parseString(
+ """
akka.actor.provider = remote
akka.actor.deployment {
/service2 {
@@ -23,7 +24,7 @@ object RemoteDeployerSpec {
}
akka.remote.netty.tcp.port = 0
""",
- ConfigParseOptions.defaults)
+ ConfigParseOptions.defaults)
class RecipeActor extends Actor {
def receive = { case _ => }
@@ -41,11 +42,12 @@ class RemoteDeployerSpec extends AkkaSpec(RemoteDeployerSpec.deployerConf) {
deployment should ===(
Some(
- Deploy(service,
- deployment.get.config,
- RoundRobinPool(3),
- RemoteScope(Address("akka", "sys", "wallace", 2552)),
- "mydispatcher")))
+ Deploy(
+ service,
+ deployment.get.config,
+ RoundRobinPool(3),
+ RemoteScope(Address("akka", "sys", "wallace", 2552)),
+ "mydispatcher")))
}
"reject remote deployment when the source requires LocalScope" in {
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala
index 1eabc54637..9b0dbd5ab4 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala
@@ -83,9 +83,10 @@ object RemoteDeploymentWhitelistSpec {
def muteSystem(system: ActorSystem): Unit = {
system.eventStream.publish(
- TestEvent.Mute(EventFilter.error(start = "AssociationError"),
- EventFilter.warning(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead letter.*")))
+ TestEvent.Mute(
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*")))
}
}
@@ -119,10 +120,10 @@ class RemoteDeploymentWhitelistSpec
override def atStartup() = {
muteSystem(system)
remoteSystem.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointException](),
- EventFilter.error(start = "AssociationError"),
- EventFilter.warning(
- pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)")))
+ TestEvent.Mute(
+ EventFilter[EndpointException](),
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)")))
}
override def afterTermination(): Unit = {
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala
index 0562b578e3..bba3d6b005 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala
@@ -116,10 +116,10 @@ class RemoteRouterSpec extends AkkaSpec(s"""
"deploy its children on remote host driven by programatic definition" in {
val probe = TestProbe()(masterSystem)
- val router = masterSystem.actorOf(new RemoteRouterConfig(
- RoundRobinPool(2),
- Seq(Address(protocol, sysName, "localhost", port))).props(echoActorProps),
- "blub2")
+ val router = masterSystem.actorOf(
+ new RemoteRouterConfig(RoundRobinPool(2), Seq(Address(protocol, sysName, "localhost", port)))
+ .props(echoActorProps),
+ "blub2")
val replies = collectRouteePaths(probe, router, 5)
val children = replies.toSet
children should have size 2
@@ -231,8 +231,9 @@ class RemoteRouterSpec extends AkkaSpec(s"""
case e => probe.ref ! e; SupervisorStrategy.Escalate
}
val router = masterSystem.actorOf(
- new RemoteRouterConfig(RoundRobinPool(1, supervisorStrategy = escalator),
- Seq(Address(protocol, sysName, "localhost", port))).props(Props.empty),
+ new RemoteRouterConfig(
+ RoundRobinPool(1, supervisorStrategy = escalator),
+ Seq(Address(protocol, sysName, "localhost", port))).props(Props.empty),
"blub3")
router.tell(GetRoutees, probe.ref)
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala
index 9ef0b68911..5b53509156 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala
@@ -26,11 +26,12 @@ object RemoteWatcherSpec {
def createFailureDetector(): FailureDetectorRegistry[Address] = {
def createFailureDetector(): FailureDetector =
- new PhiAccrualFailureDetector(threshold = 8.0,
- maxSampleSize = 200,
- minStdDeviation = 100.millis,
- acceptableHeartbeatPause = 3.seconds,
- firstHeartbeatEstimate = 1.second)
+ new PhiAccrualFailureDetector(
+ threshold = 8.0,
+ maxSampleSize = 200,
+ minStdDeviation = 100.millis,
+ acceptableHeartbeatPause = 3.seconds,
+ firstHeartbeatEstimate = 1.second)
new DefaultFailureDetectorRegistry(() => createFailureDetector())
}
@@ -41,10 +42,11 @@ object RemoteWatcherSpec {
}
class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration)
- extends RemoteWatcher(createFailureDetector,
- heartbeatInterval = TurnOff,
- unreachableReaperInterval = TurnOff,
- heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) {
+ extends RemoteWatcher(
+ createFailureDetector,
+ heartbeatInterval = TurnOff,
+ unreachableReaperInterval = TurnOff,
+ heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) {
def this() = this(heartbeatExpectedResponseAfter = TurnOff)
@@ -82,8 +84,9 @@ class RemoteWatcherSpec extends AkkaSpec("""akka {
def remoteAddressUid = AddressUidExtension(remoteSystem).addressUid
Seq(system, remoteSystem).foreach(
- muteDeadLetters(akka.remote.transport.AssociationHandle.Disassociated.getClass,
- akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_))
+ muteDeadLetters(
+ akka.remote.transport.AssociationHandle.Disassociated.getClass,
+ akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_))
override def afterTermination(): Unit = {
shutdown(remoteSystem)
diff --git a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala
index a292e50241..e2e20bc8e3 100644
--- a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala
@@ -122,9 +122,10 @@ object RemotingSpec {
def muteSystem(system: ActorSystem): Unit = {
system.eventStream.publish(
- TestEvent.Mute(EventFilter.error(start = "AssociationError"),
- EventFilter.warning(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead letter.*")))
+ TestEvent.Mute(
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*")))
}
}
@@ -186,10 +187,10 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D
override def atStartup() = {
muteSystem(system)
remoteSystem.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointException](),
- EventFilter.error(start = "AssociationError"),
- EventFilter.warning(
- pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)")))
+ TestEvent.Mute(
+ EventFilter[EndpointException](),
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)")))
}
private def byteStringOfSize(size: Int) = ByteString.fromArray(Array.fill(size)(42: Byte))
@@ -235,9 +236,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D
.withFallback(remoteSystem.settings.config)
val moreSystems = Vector.fill(5)(ActorSystem(remoteSystem.name, tcpOnlyConfig))
moreSystems.foreach { sys =>
- sys.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointDisassociatedException](),
- EventFilter.warning(pattern = "received dead letter.*")))
+ sys.eventStream.publish(TestEvent
+ .Mute(EventFilter[EndpointDisassociatedException](), EventFilter.warning(pattern = "received dead letter.*")))
sys.actorOf(Props[Echo2], name = "echo")
}
val moreRefs =
@@ -671,12 +671,12 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D
val remoteTransport = new TestTransport(rawRemoteAddress, registry)
val remoteTransportProbe = TestProbe()
- registry.registerTransport(remoteTransport,
- associationEventListenerFuture =
- Future.successful(new Transport.AssociationEventListener {
- override def notify(ev: Transport.AssociationEvent): Unit =
- remoteTransportProbe.ref ! ev
- }))
+ registry.registerTransport(
+ remoteTransport,
+ associationEventListenerFuture = Future.successful(new Transport.AssociationEventListener {
+ override def notify(ev: Transport.AssociationEvent): Unit =
+ remoteTransportProbe.ref ! ev
+ }))
val outboundHandle =
new TestAssociationHandle(rawLocalAddress, rawRemoteAddress, remoteTransport, inbound = false)
@@ -756,12 +756,12 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D
val remoteTransport = new TestTransport(rawRemoteAddress, registry)
val remoteTransportProbe = TestProbe()
- registry.registerTransport(remoteTransport,
- associationEventListenerFuture =
- Future.successful(new Transport.AssociationEventListener {
- override def notify(ev: Transport.AssociationEvent): Unit =
- remoteTransportProbe.ref ! ev
- }))
+ registry.registerTransport(
+ remoteTransport,
+ associationEventListenerFuture = Future.successful(new Transport.AssociationEventListener {
+ override def notify(ev: Transport.AssociationEvent): Unit =
+ remoteTransportProbe.ref ! ev
+ }))
val outboundHandle =
new TestAssociationHandle(rawLocalAddress, rawRemoteAddress, remoteTransport, inbound = false)
diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala
index 02cf97c4c8..ac30283604 100644
--- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala
@@ -55,12 +55,13 @@ object Configuration {
}
"""
- final case class CipherConfig(runTest: Boolean,
- config: Config,
- cipher: String,
- localPort: Int,
- remotePort: Int,
- provider: Option[ConfigSSLEngineProvider])
+ final case class CipherConfig(
+ runTest: Boolean,
+ config: Config,
+ cipher: String,
+ localPort: Int,
+ remotePort: Int,
+ provider: Option[ConfigSSLEngineProvider])
def getCipherConfig(cipher: String, enabled: String*): CipherConfig = {
val localPort, remotePort = {
@@ -121,10 +122,11 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig)
implicit val timeout: Timeout = Timeout(10.seconds)
- lazy val other: ActorSystem = ActorSystem("remote-sys",
- ConfigFactory
- .parseString("akka.remote.netty.ssl.port = " + cipherConfig.remotePort)
- .withFallback(system.settings.config))
+ lazy val other: ActorSystem = ActorSystem(
+ "remote-sys",
+ ConfigFactory
+ .parseString("akka.remote.netty.ssl.port = " + cipherConfig.remotePort)
+ .withFallback(system.settings.config))
override def afterTermination(): Unit = {
if (cipherConfig.runTest) {
diff --git a/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala
index fa1e23a3ef..1a7509fe22 100644
--- a/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala
@@ -90,12 +90,13 @@ abstract class AbstractTransientSerializationErrorSpec(config: Config)
expectMsg("ping")
// none of these should tear down the connection
- List(ManifestIllegal,
- ManifestNotSerializable,
- ToBinaryIllegal,
- ToBinaryNotSerializable,
- NotDeserializable,
- IllegalOnDeserialize).foreach(msg => selection.tell(msg, this.testActor))
+ List(
+ ManifestIllegal,
+ ManifestNotSerializable,
+ ToBinaryIllegal,
+ ToBinaryNotSerializable,
+ NotDeserializable,
+ IllegalOnDeserialize).foreach(msg => selection.tell(msg, this.testActor))
// make sure we still have a connection
selection.tell("ping", this.testActor)
diff --git a/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala
index 45322395d1..70786818e5 100644
--- a/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala
@@ -69,8 +69,9 @@ akka.loglevel = DEBUG # test verifies debug
import UntrustedSpec._
- val client = ActorSystem("UntrustedSpec-client",
- ConfigFactory.parseString("""
+ val client = ActorSystem(
+ "UntrustedSpec-client",
+ ConfigFactory.parseString("""
akka.actor.provider = remote
akka.remote.netty.tcp.port = 0
"""))
diff --git a/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala
index 14813edffb..e8d3016763 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala
@@ -42,9 +42,10 @@ abstract class ArteryMultiNodeSpec(config: Config)
* @return A new actor system configured with artery enabled. The system will
* automatically be terminated after test is completed to avoid leaks.
*/
- def newRemoteSystem(extraConfig: Option[String] = None,
- name: Option[String] = None,
- setup: Option[ActorSystemSetup] = None): ActorSystem = {
+ def newRemoteSystem(
+ extraConfig: Option[String] = None,
+ name: Option[String] = None,
+ setup: Option[ActorSystemSetup] = None): ActorSystem = {
val config =
ArterySpecSupport.newFlightRecorderConfig.withFallback(extraConfig.fold(localSystem.settings.config)(str =>
ConfigFactory.parseString(str).withFallback(localSystem.settings.config)))
diff --git a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala
index 77f6de968e..920e095b57 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala
@@ -49,15 +49,16 @@ class DuplicateHandshakeSpec extends AkkaSpec with ImplicitSender {
val env = new ReusableInboundEnvelope
env
- .init(recipient = OptionVal.None,
- sender = OptionVal.None,
- originUid = addressA.uid,
- serializerId,
- manifest,
- flags = 0,
- envelopeBuffer = null,
- association,
- lane = 0)
+ .init(
+ recipient = OptionVal.None,
+ sender = OptionVal.None,
+ originUid = addressA.uid,
+ serializerId,
+ manifest,
+ flags = 0,
+ envelopeBuffer = null,
+ association,
+ lane = 0)
.withMessage(msg)
env
}
diff --git a/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala
index 4f1b74965c..2d2bbb25d5 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala
@@ -16,9 +16,10 @@ class EnvelopeBufferSpec extends AkkaSpec {
import CompressionTestUtils._
object TestCompressor extends InboundCompressions {
- val refToIdx: Map[ActorRef, Int] = Map(minimalRef("compressable0") -> 0,
- minimalRef("compressable1") -> 1,
- minimalRef("reallylongcompressablestring") -> 2)
+ val refToIdx: Map[ActorRef, Int] = Map(
+ minimalRef("compressable0") -> 0,
+ minimalRef("compressable1") -> 1,
+ minimalRef("reallylongcompressablestring") -> 2)
val idxToRef: Map[Int, ActorRef] = refToIdx.map(_.swap)
val serializerToIdx = Map("serializer0" -> 0, "serializer1" -> 1)
diff --git a/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala
index 3a746322e6..e751cb147e 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala
@@ -74,8 +74,9 @@ class InboundHandshakeSpec extends AkkaSpec with ImplicitSender {
upstream.sendNext("msg1")
downstream.expectNext("msg1")
val uniqueRemoteAddress =
- Await.result(inboundContext.association(addressA.address).associationState.uniqueRemoteAddress,
- remainingOrDefault)
+ Await.result(
+ inboundContext.association(addressA.address).associationState.uniqueRemoteAddress,
+ remainingOrDefault)
uniqueRemoteAddress should ===(addressA)
downstream.cancel()
}
diff --git a/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala
index 2ea47e9287..ada1060af1 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala
@@ -73,11 +73,12 @@ class TestInstrument(system: ExtendedActorSystem) extends RemoteInstrument {
case _ =>
}
- override def remoteMessageReceived(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- size: Int,
- time: Long): Unit =
+ override def remoteMessageReceived(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ size: Int,
+ time: Long): Unit =
message match {
case _: MetadataCarryingSpec.Ping | ActorSelectionMessage(_: MetadataCarryingSpec.Ping, _, _) =>
MetadataCarryingSpy(system).ref.foreach(_ ! RemoteMessageReceived(recipient, message, sender, size, time))
diff --git a/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala
index b2bcf20b06..65367fc86d 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala
@@ -30,24 +30,26 @@ class OutboundHandshakeSpec extends AkkaSpec with ImplicitSender {
private val outboundEnvelopePool = ReusableOutboundEnvelope.createObjectPool(capacity = 16)
- private def setupStream(outboundContext: OutboundContext,
- timeout: FiniteDuration = 5.seconds,
- retryInterval: FiniteDuration = 10.seconds,
- injectHandshakeInterval: FiniteDuration = 10.seconds,
- livenessProbeInterval: Duration = Duration.Undefined)
+ private def setupStream(
+ outboundContext: OutboundContext,
+ timeout: FiniteDuration = 5.seconds,
+ retryInterval: FiniteDuration = 10.seconds,
+ injectHandshakeInterval: FiniteDuration = 10.seconds,
+ livenessProbeInterval: Duration = Duration.Undefined)
: (TestPublisher.Probe[String], TestSubscriber.Probe[Any]) = {
TestSource
.probe[String]
.map(msg => outboundEnvelopePool.acquire().init(OptionVal.None, msg, OptionVal.None))
.via(
- new OutboundHandshake(system,
- outboundContext,
- outboundEnvelopePool,
- timeout,
- retryInterval,
- injectHandshakeInterval,
- livenessProbeInterval))
+ new OutboundHandshake(
+ system,
+ outboundContext,
+ outboundEnvelopePool,
+ timeout,
+ retryInterval,
+ injectHandshakeInterval,
+ livenessProbeInterval))
.map(env => env.message)
.toMat(TestSink.probe[Any])(Keep.both)
.run()
diff --git a/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala
index 6b66727f49..94c8b85853 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala
@@ -136,11 +136,12 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s"""
shutdown(remoteSystem, verifySystemShutdown = true)
- val remoteSystem2 = newRemoteSystem(Some(s"""
+ val remoteSystem2 = newRemoteSystem(
+ Some(s"""
akka.remote.artery.canonical.hostname = ${remoteAddress.host.get}
akka.remote.artery.canonical.port = ${remoteAddress.port.get}
"""),
- name = Some(remoteAddress.system))
+ name = Some(remoteAddress.system))
try {
remoteSystem2.actorOf(TestActors.echoActorProps, "echo2")
diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala
index a7c61b4eda..1fd406c6a8 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala
@@ -13,9 +13,10 @@ class RemoteConnectionSpec extends ArteryMultiNodeSpec("akka.remote.retry-gate-c
def muteSystem(system: ActorSystem): Unit = {
system.eventStream.publish(
- TestEvent.Mute(EventFilter.error(start = "AssociationError"),
- EventFilter.warning(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead letter.*")))
+ TestEvent.Mute(
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*")))
}
"Remoting between systems" should {
diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala
index 749fc99516..61cc79e2b2 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala
@@ -39,11 +39,12 @@ class RemoteDeployerSpec extends AkkaSpec(RemoteDeployerSpec.deployerConf) with
deployment should ===(
Some(
- Deploy(service,
- deployment.get.config,
- RoundRobinPool(3),
- RemoteScope(Address("akka", "sys", "wallace", 2552)),
- "mydispatcher")))
+ Deploy(
+ service,
+ deployment.get.config,
+ RoundRobinPool(3),
+ RemoteScope(Address("akka", "sys", "wallace", 2552)),
+ "mydispatcher")))
}
"reject remote deployment when the source requires LocalScope" in {
diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala
index 3830739257..2add7ffc93 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala
@@ -26,9 +26,8 @@ class RemoteFailureSpec extends ArteryMultiNodeSpec with ImplicitSender {
val remoteSystems = Vector.fill(5)(newRemoteSystem())
remoteSystems.foreach { sys =>
- sys.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointDisassociatedException](),
- EventFilter.warning(pattern = "received dead letter.*")))
+ sys.eventStream.publish(TestEvent
+ .Mute(EventFilter[EndpointDisassociatedException](), EventFilter.warning(pattern = "received dead letter.*")))
sys.actorOf(TestActors.echoActorProps, name = "echo")
}
val remoteSelections = remoteSystems.map { sys =>
diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala
index 51b88ef7a5..5279ca120f 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala
@@ -89,8 +89,9 @@ class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG
}
"skip serializing remote instrument that fails" in {
- ensureDebugLog("Skipping serialization of RemoteInstrument 7 since it failed with boom",
- "Skipping local RemoteInstrument 7 that has no matching data in the message") {
+ ensureDebugLog(
+ "Skipping serialization of RemoteInstrument 7 since it failed with boom",
+ "Skipping local RemoteInstrument 7 that has no matching data in the message") {
val p = TestProbe()
val instruments =
Seq(testInstrument(7, "!", sentThrowable = boom), testInstrument(10, ".."), testInstrument(21, "???"))
@@ -102,12 +103,14 @@ class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG
}
"skip deserializing remote instrument that fails" in {
- ensureDebugLog("Skipping deserialization of RemoteInstrument 7 since it failed with boom",
- "Skipping deserialization of RemoteInstrument 21 since it failed with boom") {
+ ensureDebugLog(
+ "Skipping deserialization of RemoteInstrument 7 since it failed with boom",
+ "Skipping deserialization of RemoteInstrument 21 since it failed with boom") {
val p = TestProbe()
- val instruments = Seq(testInstrument(7, "!", receiveThrowable = boom),
- testInstrument(10, ".."),
- testInstrument(21, "???", receiveThrowable = boom))
+ val instruments = Seq(
+ testInstrument(7, "!", receiveThrowable = boom),
+ testInstrument(10, ".."),
+ testInstrument(21, "???", receiveThrowable = boom))
val ri = remoteInstruments(instruments: _*)
serializeDeserialize(ri, ri, p.ref, "waat")
p.expectMsgAllOf("waat-10-..")
@@ -131,10 +134,11 @@ object RemoteInstrumentsSerializationSpec {
override def isDebugEnabled(logClass: Class[_], logSource: String): Boolean = logSource == "DebugSource"
}
- def testInstrument(id: Int,
- metadata: String,
- sentThrowable: Throwable = null,
- receiveThrowable: Throwable = null): RemoteInstrument = {
+ def testInstrument(
+ id: Int,
+ metadata: String,
+ sentThrowable: Throwable = null,
+ receiveThrowable: Throwable = null): RemoteInstrument = {
new RemoteInstrument {
private val charset = Charset.forName("UTF-8")
private val encoder = charset.newEncoder()
@@ -142,10 +146,11 @@ object RemoteInstrumentsSerializationSpec {
override def identifier: Byte = id.toByte
- override def remoteWriteMetadata(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- buffer: ByteBuffer): Unit = {
+ override def remoteWriteMetadata(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ buffer: ByteBuffer): Unit = {
buffer.putInt(metadata.length)
if (sentThrowable ne null) throw sentThrowable
encoder.encode(CharBuffer.wrap(metadata), buffer, true)
@@ -153,10 +158,11 @@ object RemoteInstrumentsSerializationSpec {
encoder.reset()
}
- override def remoteReadMetadata(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- buffer: ByteBuffer): Unit = {
+ override def remoteReadMetadata(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ buffer: ByteBuffer): Unit = {
val size = buffer.getInt
if (receiveThrowable ne null) throw receiveThrowable
val charBuffer = CharBuffer.allocate(size)
@@ -167,17 +173,19 @@ object RemoteInstrumentsSerializationSpec {
recipient ! s"$message-$identifier-$string"
}
- override def remoteMessageSent(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- size: Int,
- time: Long): Unit = ()
+ override def remoteMessageSent(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ size: Int,
+ time: Long): Unit = ()
- override def remoteMessageReceived(recipient: ActorRef,
- message: Object,
- sender: ActorRef,
- size: Int,
- time: Long): Unit = ()
+ override def remoteMessageReceived(
+ recipient: ActorRef,
+ message: Object,
+ sender: ActorRef,
+ size: Int,
+ time: Long): Unit = ()
}
}
@@ -194,10 +202,11 @@ object RemoteInstrumentsSerializationSpec {
ri.deserializeRaw(mockInbound)
}
- def serializeDeserialize(riS: RemoteInstruments,
- riD: RemoteInstruments,
- recipient: ActorRef,
- message: AnyRef): Unit = {
+ def serializeDeserialize(
+ riS: RemoteInstruments,
+ riD: RemoteInstruments,
+ recipient: ActorRef,
+ message: AnyRef): Unit = {
val buffer = ByteBuffer.allocate(1024)
serialize(riS, buffer)
buffer.flip()
diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala
index 2d32aee44a..beca8ed0ca 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala
@@ -110,9 +110,10 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString("""
"deploy its children on remote host driven by programatic definition" in {
val probe = TestProbe()(masterSystem)
val router =
- masterSystem.actorOf(new RemoteRouterConfig(RoundRobinPool(2), Seq(Address("akka", sysName, "localhost", port)))
- .props(echoActorProps),
- "blub2")
+ masterSystem.actorOf(
+ new RemoteRouterConfig(RoundRobinPool(2), Seq(Address("akka", sysName, "localhost", port)))
+ .props(echoActorProps),
+ "blub2")
val replies = collectRouteePaths(probe, router, 5)
val children = replies.toSet
children should have size 2
@@ -224,8 +225,9 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString("""
case e => probe.ref ! e; SupervisorStrategy.Escalate
}
val router = masterSystem.actorOf(
- new RemoteRouterConfig(RoundRobinPool(1, supervisorStrategy = escalator),
- Seq(Address("akka", sysName, "localhost", port))).props(Props.empty),
+ new RemoteRouterConfig(
+ RoundRobinPool(1, supervisorStrategy = escalator),
+ Seq(Address("akka", sysName, "localhost", port))).props(Props.empty),
"blub3")
router.tell(GetRoutees, probe.ref)
diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala
index 5e7f822a7c..792f4acc32 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala
@@ -27,11 +27,12 @@ object RemoteWatcherSpec {
def createFailureDetector(): FailureDetectorRegistry[Address] = {
def createFailureDetector(): FailureDetector =
- new PhiAccrualFailureDetector(threshold = 8.0,
- maxSampleSize = 200,
- minStdDeviation = 100.millis,
- acceptableHeartbeatPause = 3.seconds,
- firstHeartbeatEstimate = 1.second)
+ new PhiAccrualFailureDetector(
+ threshold = 8.0,
+ maxSampleSize = 200,
+ minStdDeviation = 100.millis,
+ acceptableHeartbeatPause = 3.seconds,
+ firstHeartbeatEstimate = 1.second)
new DefaultFailureDetectorRegistry(() => createFailureDetector())
}
@@ -42,10 +43,11 @@ object RemoteWatcherSpec {
}
class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration)
- extends RemoteWatcher(createFailureDetector,
- heartbeatInterval = TurnOff,
- unreachableReaperInterval = TurnOff,
- heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) {
+ extends RemoteWatcher(
+ createFailureDetector,
+ heartbeatInterval = TurnOff,
+ unreachableReaperInterval = TurnOff,
+ heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) {
def this() = this(heartbeatExpectedResponseAfter = TurnOff)
@@ -75,8 +77,9 @@ class RemoteWatcherSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defaultCon
def remoteAddressUid = AddressUidExtension(remoteSystem).longAddressUid
Seq(system, remoteSystem).foreach(
- muteDeadLetters(akka.remote.transport.AssociationHandle.Disassociated.getClass,
- akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_))
+ muteDeadLetters(
+ akka.remote.transport.AssociationHandle.Disassociated.getClass,
+ akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_))
override def afterTermination(): Unit = {
shutdown(remoteSystem)
diff --git a/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala
index d0e05746ad..10b6d0ffeb 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala
@@ -18,8 +18,9 @@ object SerializationErrorSpec {
class SerializationErrorSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defaultConfig) with ImplicitSender {
import SerializationErrorSpec._
- val systemB = newRemoteSystem(name = Some("systemB"),
- extraConfig = Some("""
+ val systemB = newRemoteSystem(
+ name = Some("systemB"),
+ extraConfig = Some("""
akka.actor.serialization-identifiers {
# this will cause deserialization error
"akka.serialization.ByteArraySerializer" = -4
diff --git a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala
index 7ff943c16e..2a83976a63 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala
@@ -61,9 +61,10 @@ class SystemMessageDeliverySpec extends ArteryMultiNodeSpec(SystemMessageDeliver
system.eventStream.publish(TestEvent.Mute(EventFilter.warning(pattern = ".*negative acknowledgement.*")))
systemB.eventStream.publish(TestEvent.Mute(EventFilter.warning(pattern = ".*negative acknowledgement.*")))
- private def send(sendCount: Int,
- resendInterval: FiniteDuration,
- outboundContext: OutboundContext): Source[OutboundEnvelope, NotUsed] = {
+ private def send(
+ sendCount: Int,
+ resendInterval: FiniteDuration,
+ outboundContext: OutboundContext): Source[OutboundEnvelope, NotUsed] = {
val deadLetters = TestProbe().ref
Source(1 to sendCount)
.map(n => outboundEnvelopePool.acquire().init(OptionVal.None, TestSysMsg("msg-" + n), OptionVal.None))
diff --git a/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala b/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala
index 8e3c864c7f..214c948b7f 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala
@@ -20,10 +20,11 @@ import akka.util.OptionVal
import akka.dispatch.ExecutionContexts
import com.typesafe.config.ConfigFactory
-private[remote] class TestInboundContext(override val localAddress: UniqueAddress,
- val controlSubject: TestControlMessageSubject = new TestControlMessageSubject,
- val controlProbe: Option[ActorRef] = None,
- val replyDropRate: Double = 0.0)
+private[remote] class TestInboundContext(
+ override val localAddress: UniqueAddress,
+ val controlSubject: TestControlMessageSubject = new TestControlMessageSubject,
+ val controlProbe: Option[ActorRef] = None,
+ val replyDropRate: Double = 0.0)
extends InboundContext {
private val associationsByAddress = new ConcurrentHashMap[Address, OutboundContext]()
@@ -64,10 +65,11 @@ private[remote] class TestInboundContext(override val localAddress: UniqueAddres
ArterySettings(ConfigFactory.load().getConfig("akka.remote.artery"))
}
-private[remote] class TestOutboundContext(override val localAddress: UniqueAddress,
- override val remoteAddress: Address,
- override val controlSubject: TestControlMessageSubject,
- val controlProbe: Option[ActorRef] = None)
+private[remote] class TestOutboundContext(
+ override val localAddress: UniqueAddress,
+ override val remoteAddress: Address,
+ override val controlSubject: TestControlMessageSubject,
+ val controlProbe: Option[ActorRef] = None)
extends OutboundContext {
// access to this is synchronized (it's a test utility)
@@ -124,9 +126,10 @@ private[remote] class TestControlMessageSubject extends ControlMessageSubject {
}
-private[remote] class ManualReplyInboundContext(replyProbe: ActorRef,
- localAddress: UniqueAddress,
- controlSubject: TestControlMessageSubject)
+private[remote] class ManualReplyInboundContext(
+ replyProbe: ActorRef,
+ localAddress: UniqueAddress,
+ controlSubject: TestControlMessageSubject)
extends TestInboundContext(localAddress, controlSubject) {
private var lastReply: Option[(Address, ControlMessage)] = None
diff --git a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala
index 285bb121d5..b889043a5b 100644
--- a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala
@@ -52,10 +52,10 @@ class CompressionIntegrationSpec
// listen for compression table events
val aManifestProbe = TestProbe()(system)
val bManifestProbe = TestProbe()(systemB)
- system.eventStream.subscribe(aManifestProbe.ref,
- classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
- systemB.eventStream.subscribe(bManifestProbe.ref,
- classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
+ system.eventStream
+ .subscribe(aManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
+ systemB.eventStream
+ .subscribe(bManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
val aRefProbe = TestProbe()(system)
val bRefProbe = TestProbe()(systemB)
system.eventStream.subscribe(aRefProbe.ref, classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable])
@@ -200,10 +200,10 @@ class CompressionIntegrationSpec
// listen for compression table events
val aManifestProbe = TestProbe()(system)
val bManifestProbe = TestProbe()(systemB2)
- system.eventStream.subscribe(aManifestProbe.ref,
- classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
- systemB2.eventStream.subscribe(bManifestProbe.ref,
- classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
+ system.eventStream
+ .subscribe(aManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
+ systemB2.eventStream
+ .subscribe(bManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable])
val aRefProbe = TestProbe()(system)
val bRefProbe = TestProbe()(systemB2)
system.eventStream.subscribe(aRefProbe.ref, classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable])
@@ -275,8 +275,9 @@ class CompressionIntegrationSpec
val systemWrap = newRemoteSystem(extraConfig = Some(extraConfig))
val receivedActorRefCompressionTableProbe = TestProbe()(system)
- system.eventStream.subscribe(receivedActorRefCompressionTableProbe.ref,
- classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable])
+ system.eventStream.subscribe(
+ receivedActorRefCompressionTableProbe.ref,
+ classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable])
def createAndIdentify(i: Int) = {
val echoWrap = systemWrap.actorOf(TestActors.echoActorProps, s"echo_$i")
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala
index 446bfac5b6..3787f6893b 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala
@@ -24,10 +24,9 @@ object AllowJavaSerializationOffSpec {
val serializationSettings = SerializationSetup { _ =>
List(SerializerDetails("test", dummySerializer, List(classOf[ProgrammaticDummy])))
}
- val bootstrapSettings = BootstrapSetup(None,
- Some(
- ConfigFactory.parseString(
- """
+ val bootstrapSettings = BootstrapSetup(
+ None,
+ Some(ConfigFactory.parseString("""
akka {
actor {
serialize-messages = off
@@ -40,12 +39,12 @@ object AllowJavaSerializationOffSpec {
}
}
""")),
- None)
+ None)
val actorSystemSettings = ActorSystemSetup(bootstrapSettings, serializationSettings)
- val noJavaSerializationSystem = ActorSystem("AllowJavaSerializationOffSpec" + "NoJavaSerialization",
- ConfigFactory.parseString(
- """
+ val noJavaSerializationSystem = ActorSystem(
+ "AllowJavaSerializationOffSpec" + "NoJavaSerialization",
+ ConfigFactory.parseString("""
akka {
actor {
allow-java-serialization = off
@@ -68,15 +67,18 @@ class AllowJavaSerializationOffSpec
// allow-java-serialization=on to create the SerializationSetup and use that SerializationSetup
// in another system with allow-java-serialization=off
val addedJavaSerializationSettings = SerializationSetup { _ =>
- List(SerializerDetails("test", dummySerializer, List(classOf[ProgrammaticDummy])),
- SerializerDetails("java-manual",
- new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]),
- List(classOf[ProgrammaticJavaDummy])))
+ List(
+ SerializerDetails("test", dummySerializer, List(classOf[ProgrammaticDummy])),
+ SerializerDetails(
+ "java-manual",
+ new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]),
+ List(classOf[ProgrammaticJavaDummy])))
}
- val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(None,
- Some(
- ConfigFactory.parseString(
- """
+ val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(
+ None,
+ Some(
+ ConfigFactory.parseString(
+ """
akka {
loglevel = debug
actor {
@@ -87,7 +89,7 @@ class AllowJavaSerializationOffSpec
}
}
""")),
- None)
+ None)
val dontAllowJavaSystem =
ActorSystem(
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala
index 0c7238b825..f751a33189 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala
@@ -25,26 +25,28 @@ class ArteryMessageSerializerSpec extends AkkaSpec {
val actorA = system.actorOf(Props.empty)
val actorB = system.actorOf(Props.empty)
- Seq("Quarantined" -> Quarantined(uniqueAddress(), uniqueAddress()),
- "ActorSystemTerminating" -> ActorSystemTerminating(uniqueAddress()),
- "ActorSystemTerminatingAck" -> ActorSystemTerminatingAck(uniqueAddress()),
- "HandshakeReq" -> HandshakeReq(uniqueAddress(), uniqueAddress().address),
- "HandshakeRsp" -> HandshakeRsp(uniqueAddress()),
- "ActorRefCompressionAdvertisement" -> ActorRefCompressionAdvertisement(
- uniqueAddress(),
- CompressionTable(17L, 123, Map(actorA -> 123, actorB -> 456, system.deadLetters -> 0))),
- "ActorRefCompressionAdvertisementAck" -> ActorRefCompressionAdvertisementAck(uniqueAddress(), 23),
- "ClassManifestCompressionAdvertisement" -> ClassManifestCompressionAdvertisement(
- uniqueAddress(),
- CompressionTable(17L, 42, Map("a" -> 535, "b" -> 23))),
- "ClassManifestCompressionAdvertisementAck" -> ClassManifestCompressionAdvertisementAck(uniqueAddress(), 23),
- "SystemMessageDelivery.SystemMessageEnvelop" -> SystemMessageDelivery.SystemMessageEnvelope("test",
- 1234567890123L,
- uniqueAddress()),
- "SystemMessageDelivery.Ack" -> SystemMessageDelivery.Ack(98765432109876L, uniqueAddress()),
- "SystemMessageDelivery.Nack" -> SystemMessageDelivery.Nack(98765432109876L, uniqueAddress()),
- "RemoteWatcher.ArteryHeartbeat" -> RemoteWatcher.ArteryHeartbeat,
- "RemoteWatcher.ArteryHeartbeatRsp" -> RemoteWatcher.ArteryHeartbeatRsp(Long.MaxValue)).foreach {
+ Seq(
+ "Quarantined" -> Quarantined(uniqueAddress(), uniqueAddress()),
+ "ActorSystemTerminating" -> ActorSystemTerminating(uniqueAddress()),
+ "ActorSystemTerminatingAck" -> ActorSystemTerminatingAck(uniqueAddress()),
+ "HandshakeReq" -> HandshakeReq(uniqueAddress(), uniqueAddress().address),
+ "HandshakeRsp" -> HandshakeRsp(uniqueAddress()),
+ "ActorRefCompressionAdvertisement" -> ActorRefCompressionAdvertisement(
+ uniqueAddress(),
+ CompressionTable(17L, 123, Map(actorA -> 123, actorB -> 456, system.deadLetters -> 0))),
+ "ActorRefCompressionAdvertisementAck" -> ActorRefCompressionAdvertisementAck(uniqueAddress(), 23),
+ "ClassManifestCompressionAdvertisement" -> ClassManifestCompressionAdvertisement(
+ uniqueAddress(),
+ CompressionTable(17L, 42, Map("a" -> 535, "b" -> 23))),
+ "ClassManifestCompressionAdvertisementAck" -> ClassManifestCompressionAdvertisementAck(uniqueAddress(), 23),
+ "SystemMessageDelivery.SystemMessageEnvelop" -> SystemMessageDelivery.SystemMessageEnvelope(
+ "test",
+ 1234567890123L,
+ uniqueAddress()),
+ "SystemMessageDelivery.Ack" -> SystemMessageDelivery.Ack(98765432109876L, uniqueAddress()),
+ "SystemMessageDelivery.Nack" -> SystemMessageDelivery.Nack(98765432109876L, uniqueAddress()),
+ "RemoteWatcher.ArteryHeartbeat" -> RemoteWatcher.ArteryHeartbeat,
+ "RemoteWatcher.ArteryHeartbeatRsp" -> RemoteWatcher.ArteryHeartbeatRsp(Long.MaxValue)).foreach {
case (scenario, item) =>
s"resolve serializer for $scenario" in {
val serializer = SerializationExtension(system)
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala
index 6296d77544..bdca458d31 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala
@@ -84,10 +84,11 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec with SerializationVerificat
"serialize and de-serialize DaemonMsgCreate with FromClassCreator, with null parameters for Props" in {
verifySerialization {
- DaemonMsgCreate(props = Props(classOf[MyActorWithParam], null),
- deploy = Deploy(),
- path = "foo",
- supervisor = supervisor)
+ DaemonMsgCreate(
+ props = Props(classOf[MyActorWithParam], null),
+ deploy = Deploy(),
+ path = "foo",
+ supervisor = supervisor)
}
}
@@ -99,17 +100,18 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec with SerializationVerificat
"serialize and de-serialize DaemonMsgCreate with FromClassCreator, with function parameters for Props" in {
verifySerialization {
- DaemonMsgCreate(props = Props(classOf[MyActorWithFunParam], (i: Int) => i + 1),
- deploy = Deploy(),
- path = "foo",
- supervisor = supervisor)
+ DaemonMsgCreate(
+ props = Props(classOf[MyActorWithFunParam], (i: Int) => i + 1),
+ deploy = Deploy(),
+ path = "foo",
+ supervisor = supervisor)
}
}
"deserialize the old wire format with just class and field for props parameters (if possible)" in {
- val system = ActorSystem("DaemonMsgCreateSerializer-old-wire-format",
- ConfigFactory.parseString(
- """
+ val system = ActorSystem(
+ "DaemonMsgCreateSerializer-old-wire-format",
+ ConfigFactory.parseString("""
# in 2.4 this is off by default, but in 2.5+ its on so we wouldn't
# get the right set of serializers (and since the old wire protocol doesn't
# contain serializer ids that will go unnoticed with unpleasant consequences)
@@ -156,31 +158,34 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec with SerializationVerificat
// Duration.Inf doesn't equal Duration.Inf, so we use another for test
// we don't serialize the supervisor strategy, but always fallback to default
val supervisorStrategy = SupervisorStrategy.defaultStrategy
- val deploy1 = Deploy(path = "path1",
- config = ConfigFactory.parseString("a=1"),
- routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy),
- scope = RemoteScope(Address("akka", "Test", "host1", 1921)),
- dispatcher = "mydispatcher")
- val deploy2 = Deploy(path = "path2",
- config = ConfigFactory.parseString("a=2"),
- routerConfig = FromConfig,
- scope = RemoteScope(Address("akka", "Test", "host2", 1922)),
- dispatcher = Deploy.NoDispatcherGiven)
- DaemonMsgCreate(props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1),
- deploy = deploy2,
- path = "foo",
- supervisor = supervisor)
+ val deploy1 = Deploy(
+ path = "path1",
+ config = ConfigFactory.parseString("a=1"),
+ routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy),
+ scope = RemoteScope(Address("akka", "Test", "host1", 1921)),
+ dispatcher = "mydispatcher")
+ val deploy2 = Deploy(
+ path = "path2",
+ config = ConfigFactory.parseString("a=2"),
+ routerConfig = FromConfig,
+ scope = RemoteScope(Address("akka", "Test", "host2", 1922)),
+ dispatcher = Deploy.NoDispatcherGiven)
+ DaemonMsgCreate(
+ props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1),
+ deploy = deploy2,
+ path = "foo",
+ supervisor = supervisor)
}
}
"allows for mixing serializers with and without manifests for props parameters" in {
verifySerialization {
DaemonMsgCreate(
- // parameters should trigger JavaSerializer for the first one and additional protobuf for the second (?)
- props = Props(classOf[ActorWithDummyParameter], new DummyParameter("dummy"), system.deadLetters),
- deploy = Deploy(),
- path = "foo",
- supervisor = supervisor)
+ // parameters should trigger JavaSerializer for the first one and additional protobuf for the second (?)
+ props = Props(classOf[ActorWithDummyParameter], new DummyParameter("dummy"), system.deadLetters),
+ deploy = Deploy(),
+ path = "foo",
+ supervisor = supervisor)
}
}
@@ -205,20 +210,23 @@ class DaemonMsgCreateSerializerNoJavaSerializationSpec extends AkkaSpec("""
case _ => SupervisorStrategy.Escalate
}
- val deploy1 = Deploy(path = "path1",
- config = ConfigFactory.parseString("a=1"),
- // a whole can of worms: routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy),
- scope = RemoteScope(Address("akka", "Test", "host1", 1921)),
- dispatcher = "mydispatcher")
- val deploy2 = Deploy(path = "path2",
- config = ConfigFactory.parseString("a=2"),
- routerConfig = FromConfig,
- scope = RemoteScope(Address("akka", "Test", "host2", 1922)),
- dispatcher = Deploy.NoDispatcherGiven)
- DaemonMsgCreate(props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1),
- deploy = deploy2,
- path = "foo",
- supervisor = supervisor)
+ val deploy1 = Deploy(
+ path = "path1",
+ config = ConfigFactory.parseString("a=1"),
+ // a whole can of worms: routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy),
+ scope = RemoteScope(Address("akka", "Test", "host1", 1921)),
+ dispatcher = "mydispatcher")
+ val deploy2 = Deploy(
+ path = "path2",
+ config = ConfigFactory.parseString("a=2"),
+ routerConfig = FromConfig,
+ scope = RemoteScope(Address("akka", "Test", "host2", 1922)),
+ dispatcher = Deploy.NoDispatcherGiven)
+ DaemonMsgCreate(
+ props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1),
+ deploy = deploy2,
+ path = "foo",
+ supervisor = supervisor)
}
}
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala
index 9015117072..04bc4d6c0a 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala
@@ -23,14 +23,16 @@ class MessageContainerSerializerSpec extends AkkaSpec {
"serialize and de-serialize ActorSelectionMessage" in {
verifySerialization(
- ActorSelectionMessage("hello",
- Vector(SelectChildName("user"),
- SelectChildName("a"),
- SelectChildName("b"),
- SelectParent,
- SelectChildPattern("*"),
- SelectChildName("c")),
- wildcardFanOut = true))
+ ActorSelectionMessage(
+ "hello",
+ Vector(
+ SelectChildName("user"),
+ SelectChildName("a"),
+ SelectChildName("b"),
+ SelectParent,
+ SelectChildPattern("*"),
+ SelectChildName("c")),
+ wildcardFanOut = true))
}
def verifySerialization(msg: AnyRef): Unit = {
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala
index e88e9a810a..b1f507bdd6 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala
@@ -70,62 +70,64 @@ class MiscMessageSerializerSpec extends AkkaSpec(MiscMessageSerializerSpec.testC
val ref = system.actorOf(Props.empty, "hello")
"MiscMessageSerializer" must {
- Seq("Identify" -> Identify("some-message"),
- "Identify with None" -> Identify(None),
- "Identify with Some" -> Identify(Some("value")),
- "ActorIdentity without actor ref" -> ActorIdentity("some-message", ref = None),
- "ActorIdentity with actor ref" -> ActorIdentity("some-message", ref = Some(testActor)),
- "TestException" -> new TestException("err"),
- "TestExceptionNoStack" -> new TestExceptionNoStack("err2"),
- "TestException with cause" -> new TestException("err3", new TestException("cause")),
- "Status.Success" -> Status.Success("value"),
- "Status.Failure" -> Status.Failure(new TestException("err")),
- "Status.Failure JavaSer" -> Status.Failure(new OtherException("exc")), // exc with JavaSerializer
- "ActorRef" -> ref,
- "Some" -> Some("value"),
- "None" -> None,
- "Optional.present" -> Optional.of("value2"),
- "Optional.empty" -> Optional.empty(),
- "Kill" -> Kill,
- "PoisonPill" -> PoisonPill,
- "RemoteWatcher.Heartbeat" -> RemoteWatcher.Heartbeat,
- "RemoteWatcher.HertbeatRsp" -> RemoteWatcher.HeartbeatRsp(65537),
- "Done" -> Done,
- "NotUsed" -> NotUsed,
- "Address" -> Address("akka", "system", "host", 1337),
- "UniqueAddress" -> akka.remote.UniqueAddress(Address("akka", "system", "host", 1337), 82751),
- "LocalScope" -> LocalScope,
- "RemoteScope" -> RemoteScope(Address("akka", "system", "localhost", 2525)),
- "Config" -> system.settings.config,
- "Empty Config" -> ConfigFactory.empty(),
- "FromConfig" -> FromConfig,
- // routers
- "DefaultResizer" -> DefaultResizer(),
- "BalancingPool" -> BalancingPool(nrOfInstances = 25),
- "BalancingPool with custom dispatcher" -> BalancingPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"),
- "BroadcastPool" -> BroadcastPool(nrOfInstances = 25),
- "BroadcastPool with custom dispatcher and resizer" -> BroadcastPool(nrOfInstances = 25,
- routerDispatcher = "my-dispatcher",
- usePoolDispatcher = true,
- resizer = Some(DefaultResizer())),
- "RandomPool" -> RandomPool(nrOfInstances = 25),
- "RandomPool with custom dispatcher" -> RandomPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"),
- "RoundRobinPool" -> RoundRobinPool(25),
- "ScatterGatherFirstCompletedPool" -> ScatterGatherFirstCompletedPool(25, within = 3.seconds),
- "TailChoppingPool" -> TailChoppingPool(25, within = 3.seconds, interval = 1.second),
- "RemoteRouterConfig" -> RemoteRouterConfig(local = RandomPool(25),
- nodes = List(Address("akka", "system", "localhost", 2525))))
- .foreach {
- case (scenario, item) =>
- s"resolve serializer for $scenario" in {
- val serializer = SerializationExtension(system)
- serializer.serializerFor(item.getClass).getClass should ===(classOf[MiscMessageSerializer])
- }
+ Seq(
+ "Identify" -> Identify("some-message"),
+ "Identify with None" -> Identify(None),
+ "Identify with Some" -> Identify(Some("value")),
+ "ActorIdentity without actor ref" -> ActorIdentity("some-message", ref = None),
+ "ActorIdentity with actor ref" -> ActorIdentity("some-message", ref = Some(testActor)),
+ "TestException" -> new TestException("err"),
+ "TestExceptionNoStack" -> new TestExceptionNoStack("err2"),
+ "TestException with cause" -> new TestException("err3", new TestException("cause")),
+ "Status.Success" -> Status.Success("value"),
+ "Status.Failure" -> Status.Failure(new TestException("err")),
+ "Status.Failure JavaSer" -> Status.Failure(new OtherException("exc")), // exc with JavaSerializer
+ "ActorRef" -> ref,
+ "Some" -> Some("value"),
+ "None" -> None,
+ "Optional.present" -> Optional.of("value2"),
+ "Optional.empty" -> Optional.empty(),
+ "Kill" -> Kill,
+ "PoisonPill" -> PoisonPill,
+ "RemoteWatcher.Heartbeat" -> RemoteWatcher.Heartbeat,
+ "RemoteWatcher.HertbeatRsp" -> RemoteWatcher.HeartbeatRsp(65537),
+ "Done" -> Done,
+ "NotUsed" -> NotUsed,
+ "Address" -> Address("akka", "system", "host", 1337),
+ "UniqueAddress" -> akka.remote.UniqueAddress(Address("akka", "system", "host", 1337), 82751),
+ "LocalScope" -> LocalScope,
+ "RemoteScope" -> RemoteScope(Address("akka", "system", "localhost", 2525)),
+ "Config" -> system.settings.config,
+ "Empty Config" -> ConfigFactory.empty(),
+ "FromConfig" -> FromConfig,
+ // routers
+ "DefaultResizer" -> DefaultResizer(),
+ "BalancingPool" -> BalancingPool(nrOfInstances = 25),
+ "BalancingPool with custom dispatcher" -> BalancingPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"),
+ "BroadcastPool" -> BroadcastPool(nrOfInstances = 25),
+ "BroadcastPool with custom dispatcher and resizer" -> BroadcastPool(
+ nrOfInstances = 25,
+ routerDispatcher = "my-dispatcher",
+ usePoolDispatcher = true,
+ resizer = Some(DefaultResizer())),
+ "RandomPool" -> RandomPool(nrOfInstances = 25),
+ "RandomPool with custom dispatcher" -> RandomPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"),
+ "RoundRobinPool" -> RoundRobinPool(25),
+ "ScatterGatherFirstCompletedPool" -> ScatterGatherFirstCompletedPool(25, within = 3.seconds),
+ "TailChoppingPool" -> TailChoppingPool(25, within = 3.seconds, interval = 1.second),
+ "RemoteRouterConfig" -> RemoteRouterConfig(
+ local = RandomPool(25),
+ nodes = List(Address("akka", "system", "localhost", 2525)))).foreach {
+ case (scenario, item) =>
+ s"resolve serializer for $scenario" in {
+ val serializer = SerializationExtension(system)
+ serializer.serializerFor(item.getClass).getClass should ===(classOf[MiscMessageSerializer])
+ }
- s"serialize and de-serialize $scenario" in {
- verifySerialization(item)
- }
- }
+ s"serialize and de-serialize $scenario" in {
+ verifySerialization(item)
+ }
+ }
"reject invalid manifest" in {
intercept[IllegalArgumentException] {
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala
index a36e640372..14d8b0771d 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala
@@ -115,10 +115,11 @@ class PrimitivesSerializationSpec extends AkkaSpec(PrimitivesSerializationSpec.t
}
"ByteStringSerializer" must {
- Seq("empty string" -> ByteString.empty,
- "simple content" -> ByteString("hello"),
- "concatenated content" -> (ByteString("hello") ++ ByteString("world")),
- "sliced content" -> ByteString("helloabc").take(5)).foreach {
+ Seq(
+ "empty string" -> ByteString.empty,
+ "simple content" -> ByteString("hello"),
+ "concatenated content" -> (ByteString("hello") ++ ByteString("world")),
+ "sliced content" -> ByteString("helloabc").take(5)).foreach {
case (scenario, item) =>
s"resolve serializer for [$scenario]" in {
val serializer = SerializationExtension(system)
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala
index 66b4db146e..87a332d4e8 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala
@@ -32,17 +32,18 @@ class SystemMessageSerializationSpec extends AkkaSpec(PrimitivesSerializationSpe
val testRef2 = TestProbe().ref.asInstanceOf[InternalActorRef]
"ByteStringSerializer" must {
- Seq("Create(None)" -> Create(None),
- "Recreate(ex)" -> Recreate(new TestException("test2")),
- "Suspend()" -> Suspend(),
- "Resume(ex)" -> Resume(new TestException("test3")),
- "Terminate()" -> Terminate(),
- "Supervise(ref, async)" -> Supervise(testRef, async = true),
- "Watch(ref, ref)" -> Watch(testRef, testRef2),
- "Unwatch(ref, ref)" -> Unwatch(testRef, testRef2),
- "Failed(ref, ex, uid)" -> Failed(testRef, new TestException("test4"), 42),
- "DeathWatchNotification(ref, confimed, addressTerminated)" ->
- DeathWatchNotification(testRef, existenceConfirmed = true, addressTerminated = true)).foreach {
+ Seq(
+ "Create(None)" -> Create(None),
+ "Recreate(ex)" -> Recreate(new TestException("test2")),
+ "Suspend()" -> Suspend(),
+ "Resume(ex)" -> Resume(new TestException("test3")),
+ "Terminate()" -> Terminate(),
+ "Supervise(ref, async)" -> Supervise(testRef, async = true),
+ "Watch(ref, ref)" -> Watch(testRef, testRef2),
+ "Unwatch(ref, ref)" -> Unwatch(testRef, testRef2),
+ "Failed(ref, ex, uid)" -> Failed(testRef, new TestException("test4"), 42),
+ "DeathWatchNotification(ref, confimed, addressTerminated)" ->
+ DeathWatchNotification(testRef, existenceConfirmed = true, addressTerminated = true)).foreach {
case (scenario, item) =>
s"resolve serializer for [$scenario]" in {
val serializer = SerializationExtension(system)
diff --git a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala
index fc765a4cc7..3f47e373a8 100644
--- a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala
@@ -139,12 +139,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val (failureDetector, _, _, handle) = collaborators
system.actorOf(
- ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- handle,
- ActorAssociationEventListener(testActor),
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector))
+ ProtocolStateActor.inboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ handle,
+ ActorAssociationEventListener(testActor),
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector))
awaitCond(handle.readHandlerPromise.isCompleted)
}
@@ -153,12 +154,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val (failureDetector, registry, _, handle) = collaborators
val reader = system.actorOf(
- ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- handle,
- ActorAssociationEventListener(testActor),
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector))
+ ProtocolStateActor.inboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ handle,
+ ActorAssociationEventListener(testActor),
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector))
reader ! testAssociate(uid = 33, cookie = None)
@@ -188,12 +190,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val (failureDetector, registry, _, handle) = collaborators
val reader = system.actorOf(
- ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- handle,
- ActorAssociationEventListener(testActor),
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector))
+ ProtocolStateActor.inboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ handle,
+ ActorAssociationEventListener(testActor),
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector))
// a stray message will force a disassociate
reader ! testHeartbeat
@@ -214,14 +217,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val statusPromise: Promise[AssociationHandle] = Promise()
val reader = system.actorOf(
- ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- remoteAddress,
- statusPromise,
- transport,
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector,
- refuseUid = None))
+ ProtocolStateActor.outboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ remoteAddress,
+ statusPromise,
+ transport,
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector,
+ refuseUid = None))
awaitCond(lastActivityIsAssociate(registry, 42, None))
failureDetector.called should ===(true)
@@ -249,15 +253,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val (failureDetector, registry, _, handle) = collaborators
val reader = system.actorOf(
- ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")),
- handle,
- ActorAssociationEventListener(testActor),
- new AkkaProtocolSettings(
- ConfigFactory
- .parseString("akka.remote.require-cookie = on")
- .withFallback(conf)),
- codec,
- failureDetector))
+ ProtocolStateActor.inboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")),
+ handle,
+ ActorAssociationEventListener(testActor),
+ new AkkaProtocolSettings(ConfigFactory.parseString("akka.remote.require-cookie = on").withFallback(conf)),
+ codec,
+ failureDetector))
reader ! testAssociate(uid = 33, Some("xyzzy"))
@@ -271,15 +273,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val (failureDetector, registry, _, handle) = collaborators
val reader = system.actorOf(
- ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")),
- handle,
- ActorAssociationEventListener(testActor),
- new AkkaProtocolSettings(
- ConfigFactory
- .parseString("akka.remote.require-cookie = on")
- .withFallback(conf)),
- codec,
- failureDetector))
+ ProtocolStateActor.inboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")),
+ handle,
+ ActorAssociationEventListener(testActor),
+ new AkkaProtocolSettings(ConfigFactory.parseString("akka.remote.require-cookie = on").withFallback(conf)),
+ codec,
+ failureDetector))
// Send the correct cookie
reader ! testAssociate(uid = 33, Some("abcde"))
@@ -306,17 +306,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val statusPromise: Promise[AssociationHandle] = Promise()
system.actorOf(
- ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")),
- remoteAddress,
- statusPromise,
- transport,
- new AkkaProtocolSettings(
- ConfigFactory
- .parseString("akka.remote.require-cookie = on")
- .withFallback(conf)),
- codec,
- failureDetector,
- refuseUid = None))
+ ProtocolStateActor.outboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")),
+ remoteAddress,
+ statusPromise,
+ transport,
+ new AkkaProtocolSettings(ConfigFactory.parseString("akka.remote.require-cookie = on").withFallback(conf)),
+ codec,
+ failureDetector,
+ refuseUid = None))
awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = Some("abcde")))
}
@@ -328,14 +326,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val statusPromise: Promise[AssociationHandle] = Promise()
val reader = system.actorOf(
- ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- remoteAddress,
- statusPromise,
- transport,
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector,
- refuseUid = None))
+ ProtocolStateActor.outboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ remoteAddress,
+ statusPromise,
+ transport,
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector,
+ refuseUid = None))
awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None))
@@ -364,14 +363,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val statusPromise: Promise[AssociationHandle] = Promise()
val reader = system.actorOf(
- ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- remoteAddress,
- statusPromise,
- transport,
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector,
- refuseUid = None))
+ ProtocolStateActor.outboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ remoteAddress,
+ statusPromise,
+ transport,
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector,
+ refuseUid = None))
awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None))
@@ -400,14 +400,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val statusPromise: Promise[AssociationHandle] = Promise()
val stateActor = system.actorOf(
- ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- remoteAddress,
- statusPromise,
- transport,
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector,
- refuseUid = None))
+ ProtocolStateActor.outboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ remoteAddress,
+ statusPromise,
+ transport,
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector,
+ refuseUid = None))
awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None))
@@ -439,14 +440,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val statusPromise: Promise[AssociationHandle] = Promise()
val stateActor = system.actorOf(
- ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- remoteAddress,
- statusPromise,
- transport,
- new AkkaProtocolSettings(conf),
- codec,
- failureDetector,
- refuseUid = None))
+ ProtocolStateActor.outboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ remoteAddress,
+ statusPromise,
+ transport,
+ new AkkaProtocolSettings(conf),
+ codec,
+ failureDetector,
+ refuseUid = None))
awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None))
@@ -479,14 +481,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val conf2 = ConfigFactory.parseString("akka.remote.netty.tcp.connection-timeout = 500 ms").withFallback(conf)
val stateActor = system.actorOf(
- ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- remoteAddress,
- statusPromise,
- transport,
- new AkkaProtocolSettings(conf2),
- codec,
- failureDetector,
- refuseUid = None))
+ ProtocolStateActor.outboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ remoteAddress,
+ statusPromise,
+ transport,
+ new AkkaProtocolSettings(conf2),
+ codec,
+ failureDetector,
+ refuseUid = None))
watch(stateActor)
intercept[TimeoutException] {
@@ -501,12 +504,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit
val conf2 = ConfigFactory.parseString("akka.remote.netty.tcp.connection-timeout = 500 ms").withFallback(conf)
val reader = system.actorOf(
- ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
- handle,
- ActorAssociationEventListener(testActor),
- new AkkaProtocolSettings(conf2),
- codec,
- failureDetector))
+ ProtocolStateActor.inboundProps(
+ HandshakeInfo(origin = localAddress, uid = 42, cookie = None),
+ handle,
+ ActorAssociationEventListener(testActor),
+ new AkkaProtocolSettings(conf2),
+ codec,
+ failureDetector))
watch(reader)
expectTerminated(reader)
diff --git a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala
index 534042ef1b..f8a1aa1ec8 100644
--- a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala
+++ b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala
@@ -126,9 +126,10 @@ class AkkaProtocolStressTest extends AkkaSpec(configA) with ImplicitSender with
EventFilter.warning(source = "akka://AkkaProtocolStressTest/user/$a", start = "received dead letter"),
EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
systemB.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointException](),
- EventFilter.error(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
+ TestEvent.Mute(
+ EventFilter[EndpointException](),
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
}
override def afterTermination(): Unit = shutdown(systemB)
diff --git a/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala
index cac84bb088..f640f2441e 100644
--- a/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala
@@ -33,10 +33,11 @@ abstract class GenericTransportSpec(withAkkaProtocol: Boolean = false)
def wrapTransport(transport: Transport): Transport =
if (withAkkaProtocol) {
val provider = system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider]
- new AkkaProtocolTransport(transport,
- system,
- new AkkaProtocolSettings(provider.remoteSettings.config),
- AkkaPduProtobufCodec)
+ new AkkaProtocolTransport(
+ transport,
+ system,
+ new AkkaProtocolSettings(provider.remoteSettings.config),
+ AkkaPduProtobufCodec)
} else transport
def newTransportA(registry: AssociationRegistry): Transport =
diff --git a/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala b/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala
index 808407326a..ad69d4822c 100644
--- a/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala
+++ b/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala
@@ -132,13 +132,15 @@ abstract class SystemMessageDeliveryStressTest(msg: String, cfg: String)
override def atStartup() = {
systemA.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointException](),
- EventFilter.error(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead .*")))
+ TestEvent.Mute(
+ EventFilter[EndpointException](),
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead .*")))
systemB.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointException](),
- EventFilter.error(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead .*")))
+ TestEvent.Mute(
+ EventFilter[EndpointException](),
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead .*")))
systemA.eventStream.subscribe(probeA.ref, classOf[QuarantinedEvent])
systemB.eventStream.subscribe(probeB.ref, classOf[QuarantinedEvent])
@@ -184,9 +186,10 @@ abstract class SystemMessageDeliveryStressTest(msg: String, cfg: String)
EventFilter.warning(source = s"akka://AkkaProtocolStressTest/user/$$a", start = "received dead letter"),
EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
systemB.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointException](),
- EventFilter.error(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
+ TestEvent.Mute(
+ EventFilter[EndpointException](),
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
}
override def afterTermination(): Unit = shutdown(systemB)
diff --git a/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala
index 73290564d5..7fa61b2c19 100644
--- a/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala
@@ -145,9 +145,10 @@ class ThrottlerTransportAdapterSpec extends AkkaSpec(configA) with ImplicitSende
EventFilter.warning(source = "akka://AkkaProtocolStressTest/user/$a", start = "received dead letter"),
EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
systemB.eventStream.publish(
- TestEvent.Mute(EventFilter[EndpointException](),
- EventFilter.error(start = "AssociationError"),
- EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
+ TestEvent.Mute(
+ EventFilter[EndpointException](),
+ EventFilter.error(start = "AssociationError"),
+ EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)")))
}
override def afterTermination(): Unit = shutdown(systemB)
diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala
index 8adde956d3..2d9e571040 100644
--- a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala
+++ b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala
@@ -67,9 +67,10 @@ class Slf4jLogger extends Actor with SLF4JLogging with RequiresMessageQueue[Logg
case Error.NoCause | null =>
Logger(logClass, logSource).error(markerIfPresent(event), if (message != null) message.toString else null)
case _ =>
- Logger(logClass, logSource).error(markerIfPresent(event),
- if (message != null) message.toString else cause.getLocalizedMessage,
- cause)
+ Logger(logClass, logSource).error(
+ markerIfPresent(event),
+ if (message != null) message.toString else cause.getLocalizedMessage,
+ cause)
}
}
@@ -77,9 +78,10 @@ class Slf4jLogger extends Actor with SLF4JLogging with RequiresMessageQueue[Logg
withMdc(logSource, event) {
event match {
case e: LogEventWithCause =>
- Logger(logClass, logSource).warn(markerIfPresent(event),
- if (message != null) message.toString else e.cause.getLocalizedMessage,
- e.cause)
+ Logger(logClass, logSource).warn(
+ markerIfPresent(event),
+ if (message != null) message.toString else e.cause.getLocalizedMessage,
+ e.cause)
case _ =>
Logger(logClass, logSource).warn(markerIfPresent(event), if (message != null) message.toString else null)
}
diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala
index 4e023c014e..0c4a5675f7 100644
--- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala
+++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala
@@ -133,9 +133,10 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft
"log info with slf4j marker and MDC" in {
val slf4jMarker = MarkerFactory.getMarker("SLF")
slf4jMarker.add(MarkerFactory.getMarker("ADDED")) // slf4j markers can have children
- producer ! StringWithSlf4jMarkerMDC("security-wise interesting message",
- slf4jMarker,
- Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values"))
+ producer ! StringWithSlf4jMarkerMDC(
+ "security-wise interesting message",
+ slf4jMarker,
+ Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values"))
awaitCond(outputString.contains("----"), 5 seconds)
val s = outputString
@@ -145,8 +146,9 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft
}
"put custom MDC values when specified" in {
- producer ! StringWithMDC("Message with custom MDC values",
- Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values"))
+ producer ! StringWithMDC(
+ "Message with custom MDC values",
+ Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values"))
awaitCond(outputString.contains("----"), 5 seconds)
val s = outputString
diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala
index 159d8350fc..ac7e0e894e 100644
--- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala
+++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala
@@ -107,8 +107,9 @@ object TestPublisher {
}
def executeAfterSubscription[T](f: => T): T = {
- subscribed.await(probe.testKitSettings.DefaultTimeout.duration.length,
- probe.testKitSettings.DefaultTimeout.duration.unit)
+ subscribed.await(
+ probe.testKitSettings.DefaultTimeout.duration.length,
+ probe.testKitSettings.DefaultTimeout.duration.unit)
f
}
@@ -167,9 +168,10 @@ object TestPublisher {
/**
* Receive messages for a given duration or until one does not match a given partial function.
*/
- def receiveWhile[T](max: Duration = Duration.Undefined,
- idle: Duration = Duration.Inf,
- messages: Int = Int.MaxValue)(f: PartialFunction[PublisherEvent, T]): immutable.Seq[T] =
+ def receiveWhile[T](
+ max: Duration = Duration.Undefined,
+ idle: Duration = Duration.Inf,
+ messages: Int = Int.MaxValue)(f: PartialFunction[PublisherEvent, T]): immutable.Seq[T] =
executeAfterSubscription { probe.receiveWhile(max, idle, messages)(f.asInstanceOf[PartialFunction[AnyRef, T]]) }
def expectEventPF[T](f: PartialFunction[PublisherEvent, T]): T =
@@ -674,9 +676,10 @@ object TestSubscriber {
/**
* Receive messages for a given duration or until one does not match a given partial function.
*/
- def receiveWhile[T](max: Duration = Duration.Undefined,
- idle: Duration = Duration.Inf,
- messages: Int = Int.MaxValue)(f: PartialFunction[SubscriberEvent, T]): immutable.Seq[T] =
+ def receiveWhile[T](
+ max: Duration = Duration.Undefined,
+ idle: Duration = Duration.Inf,
+ messages: Int = Int.MaxValue)(f: PartialFunction[SubscriberEvent, T]): immutable.Seq[T] =
probe.receiveWhile(max, idle, messages)(f.asInstanceOf[PartialFunction[AnyRef, T]])
/**
diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala
index d67d10baaa..785b8badc4 100644
--- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala
+++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala
@@ -45,8 +45,9 @@ object TestSinkStage {
Sink.fromGraph(new TestSinkStage(stageUnderTest, probe))
}
-private[testkit] class TestSinkStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M],
- probe: TestProbe)
+private[testkit] class TestSinkStage[T, M](
+ stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M],
+ probe: TestProbe)
extends GraphStageWithMaterializedValue[SinkShape[T], M] {
val in = Inlet[T]("testSinkStage.in")
@@ -108,8 +109,9 @@ object TestSourceStage {
Source.fromGraph(new TestSourceStage(stageUnderTest, probe))
}
-private[testkit] class TestSourceStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M],
- probe: TestProbe)
+private[testkit] class TestSourceStage[T, M](
+ stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M],
+ probe: TestProbe)
extends GraphStageWithMaterializedValue[SourceShape[T], M] {
val out = Outlet[T]("testSourceStage.out")
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala
index 363e7ba4ed..791ee5ff5f 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala
@@ -17,15 +17,17 @@ class ChainSetup[In, Out, M](
materializer: ActorMaterializer,
toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit val system: ActorSystem) {
- def this(stream: Flow[In, In, NotUsed] => Flow[In, Out, M],
- settings: ActorMaterializerSettings,
- toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) =
+ def this(
+ stream: Flow[In, In, NotUsed] => Flow[In, Out, M],
+ settings: ActorMaterializerSettings,
+ toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) =
this(stream, settings, ActorMaterializer(settings)(system), toPublisher)(system)
- def this(stream: Flow[In, In, NotUsed] => Flow[In, Out, M],
- settings: ActorMaterializerSettings,
- materializerCreator: (ActorMaterializerSettings, ActorRefFactory) => ActorMaterializer,
- toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) =
+ def this(
+ stream: Flow[In, In, NotUsed] => Flow[In, Out, M],
+ settings: ActorMaterializerSettings,
+ materializerCreator: (ActorMaterializerSettings, ActorRefFactory) => ActorMaterializer,
+ toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) =
this(stream, settings, materializerCreator(settings, system), toPublisher)(system)
val upstream = TestPublisher.manualProbe[In]()
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala
index f0d5675b4c..c369e33fa5 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala
@@ -36,60 +36,66 @@ trait ScriptedTest extends Matchers {
jumps ++= Vector.fill(ins.size - 1)(0) ++ Vector(outs.size)
}
- new Script(providedInputs,
- expectedOutputs,
- jumps,
- inputCursor = 0,
- outputCursor = 0,
- outputEndCursor = 0,
- completed = false)
+ new Script(
+ providedInputs,
+ expectedOutputs,
+ jumps,
+ inputCursor = 0,
+ outputCursor = 0,
+ outputEndCursor = 0,
+ completed = false)
}
}
- final class Script[In, Out](val providedInputs: Vector[In],
- val expectedOutputs: Vector[Out],
- val jumps: Vector[Int],
- val inputCursor: Int,
- val outputCursor: Int,
- val outputEndCursor: Int,
- val completed: Boolean) {
+ final class Script[In, Out](
+ val providedInputs: Vector[In],
+ val expectedOutputs: Vector[Out],
+ val jumps: Vector[Int],
+ val inputCursor: Int,
+ val outputCursor: Int,
+ val outputEndCursor: Int,
+ val completed: Boolean) {
require(jumps.size == providedInputs.size)
def provideInput: (In, Script[In, Out]) =
if (noInsPending)
throw new ScriptException("Script cannot provide more input.")
else
- (providedInputs(inputCursor),
- new Script(providedInputs,
- expectedOutputs,
- jumps,
- inputCursor = inputCursor + 1,
- outputCursor,
- outputEndCursor = outputEndCursor + jumps(inputCursor),
- completed))
+ (
+ providedInputs(inputCursor),
+ new Script(
+ providedInputs,
+ expectedOutputs,
+ jumps,
+ inputCursor = inputCursor + 1,
+ outputCursor,
+ outputEndCursor = outputEndCursor + jumps(inputCursor),
+ completed))
def consumeOutput(out: Out): Script[In, Out] = {
if (noOutsPending)
throw new ScriptException(s"Tried to produce element ${out} but no elements should be produced right now.")
out should be(expectedOutputs(outputCursor))
- new Script(providedInputs,
- expectedOutputs,
- jumps,
- inputCursor,
- outputCursor = outputCursor + 1,
- outputEndCursor,
- completed)
+ new Script(
+ providedInputs,
+ expectedOutputs,
+ jumps,
+ inputCursor,
+ outputCursor = outputCursor + 1,
+ outputEndCursor,
+ completed)
}
def complete(): Script[In, Out] = {
if (finished)
- new Script(providedInputs,
- expectedOutputs,
- jumps,
- inputCursor,
- outputCursor = outputCursor + 1,
- outputEndCursor,
- completed = true)
+ new Script(
+ providedInputs,
+ expectedOutputs,
+ jumps,
+ inputCursor,
+ outputCursor = outputCursor + 1,
+ outputEndCursor,
+ completed = true)
else fail("received onComplete prematurely")
}
@@ -111,12 +117,13 @@ trait ScriptedTest extends Matchers {
.mkString("/")}, remainingOuts=${expectedOutputs.drop(outputCursor).mkString("/")})"
}
- class ScriptRunner[In, Out, M](op: Flow[In, In, NotUsed] => Flow[In, Out, M],
- settings: ActorMaterializerSettings,
- script: Script[In, Out],
- maximumOverrun: Int,
- maximumRequest: Int,
- maximumBuffer: Int)(implicit _system: ActorSystem)
+ class ScriptRunner[In, Out, M](
+ op: Flow[In, In, NotUsed] => Flow[In, Out, M],
+ settings: ActorMaterializerSettings,
+ script: Script[In, Out],
+ maximumOverrun: Int,
+ maximumRequest: Int,
+ maximumBuffer: Int)(implicit _system: ActorSystem)
extends ChainSetup(op, settings, toPublisher) {
var _debugLog = Vector.empty[String]
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala
index 718a70a34d..1fed257d32 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala
@@ -29,14 +29,16 @@ private[akka] final case class StreamTestDefaultMailbox()
owner match {
case Some(r: ActorRefWithCell) =>
val actorClass = r.underlying.props.actorClass
- assert(actorClass != classOf[Actor],
- s"Don't use anonymous actor classes, actor class for $r was [${actorClass.getName}]")
+ assert(
+ actorClass != classOf[Actor],
+ s"Don't use anonymous actor classes, actor class for $r was [${actorClass.getName}]")
// StreamTcpManager is allowed to use another dispatcher
- assert(!actorClass.getName.startsWith("akka.stream."),
- s"$r with actor class [${actorClass.getName}] must not run on default dispatcher in tests. " +
- "Did you forget to define `props.withDispatcher` when creating the actor? " +
- "Or did you forget to configure the `akka.stream.materializer` setting accordingly or force the " +
- """dispatcher using `ActorMaterializerSettings(sys).withDispatcher("akka.test.stream-dispatcher")` in the test?""")
+ assert(
+ !actorClass.getName.startsWith("akka.stream."),
+ s"$r with actor class [${actorClass.getName}] must not run on default dispatcher in tests. " +
+ "Did you forget to define `props.withDispatcher` when creating the actor? " +
+ "Or did you forget to configure the `akka.stream.materializer` setting accordingly or force the " +
+ """dispatcher using `ActorMaterializerSettings(sys).withDispatcher("akka.test.stream-dispatcher")` in the test?""")
case _ =>
}
new UnboundedMailbox.MessageQueue
diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala
index 3e5587066d..0b8c2e6765 100644
--- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala
+++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala
@@ -36,9 +36,10 @@ trait ActorSystemLifecycle {
Await.ready(system.terminate(), shutdownTimeout)
} catch {
case _: TimeoutException =>
- val msg = "Failed to stop [%s] within [%s] \n%s".format(system.name,
- shutdownTimeout,
- system.asInstanceOf[ActorSystemImpl].printTree)
+ val msg = "Failed to stop [%s] within [%s] \n%s".format(
+ system.name,
+ shutdownTimeout,
+ system.asInstanceOf[ActorSystemImpl].printTree)
throw new RuntimeException(msg)
}
}
diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala
index a834c7b0a3..145fffa76a 100644
--- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala
+++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala
@@ -20,8 +20,9 @@ abstract class AkkaIdentityProcessorVerification[T](env: TestEnvironment, publis
with ActorSystemLifecycle {
def this(printlnDebug: Boolean) =
- this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug),
- Timeouts.publisherShutdownTimeoutMillis)
+ this(
+ new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug),
+ Timeouts.publisherShutdownTimeoutMillis)
def this() = this(false)
diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala
index b9ba15a31c..6b8d8889c8 100644
--- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala
+++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala
@@ -18,8 +18,9 @@ abstract class AkkaPublisherVerification[T](val env: TestEnvironment, publisherS
with ActorSystemLifecycle {
def this(printlnDebug: Boolean) =
- this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug),
- Timeouts.publisherShutdownTimeoutMillis)
+ this(
+ new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug),
+ Timeouts.publisherShutdownTimeoutMillis)
def this() = this(false)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala
index 5343c8c150..56e3e94251 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala
@@ -85,8 +85,9 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender {
"handle properly broken Props" in {
val m = ActorMaterializer.create(system)
an[IllegalArgumentException] should be thrownBy
- Await.result(Source.actorPublisher(Props(classOf[TestActor], "wrong", "arguments")).runWith(Sink.head)(m),
- 3.seconds)
+ Await.result(
+ Source.actorPublisher(Props(classOf[TestActor], "wrong", "arguments")).runWith(Sink.head)(m),
+ 3.seconds)
}
"report correctly if it has been shut down from the side" in {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala
index 73e2747fb9..841bc29fc5 100755
--- a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala
@@ -11,13 +11,15 @@ import org.scalatest.WordSpec
object DslConsistencySpec {
class ScalaSubSource[Out, Mat]
- extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]](null,
- null,
- null)
+ extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]](
+ null,
+ null,
+ null)
class ScalaSubFlow[In, Out, Mat]
- extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]](null,
- null,
- null)
+ extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]](
+ null,
+ null,
+ null)
}
class DslConsistencySpec extends WordSpec with Matchers {
@@ -44,52 +46,45 @@ class DslConsistencySpec extends WordSpec with Matchers {
Set("equals", "hashCode", "notify", "notifyAll", "wait", "toString", "getClass") ++
Set("productArity", "canEqual", "productPrefix", "copy", "productIterator", "productElement") ++
Set("productElementName", "productElementNames") ++
- Set("create",
- "apply",
- "ops",
- "appendJava",
- "andThen",
- "andThenMat",
- "isIdentity",
- "withAttributes",
- "transformMaterializing") ++
+ Set(
+ "create",
+ "apply",
+ "ops",
+ "appendJava",
+ "andThen",
+ "andThenMat",
+ "isIdentity",
+ "withAttributes",
+ "transformMaterializing") ++
Set("asScala", "asJava", "deprecatedAndThen", "deprecatedAndThenMat")
- val graphHelpers = Set("zipGraph",
- "zipWithGraph",
- "zipLatestGraph",
- "zipLatestWithGraph",
- "mergeGraph",
- "mergeSortedGraph",
- "interleaveGraph",
- "concatGraph",
- "prependGraph",
- "alsoToGraph",
- "wireTapGraph",
- "orElseGraph",
- "divertToGraph")
+ val graphHelpers = Set(
+ "zipGraph",
+ "zipWithGraph",
+ "zipLatestGraph",
+ "zipLatestWithGraph",
+ "mergeGraph",
+ "mergeSortedGraph",
+ "interleaveGraph",
+ "concatGraph",
+ "prependGraph",
+ "alsoToGraph",
+ "wireTapGraph",
+ "orElseGraph",
+ "divertToGraph")
- val allowMissing: Map[Class[_], Set[String]] = Map(jFlowClass -> graphHelpers,
- jSourceClass -> (graphHelpers ++ Set("watch", "ask")),
- // Java subflows can only be nested using .via and .to (due to type system restrictions)
- jSubFlowClass -> (graphHelpers ++ Set("groupBy",
- "splitAfter",
- "splitWhen",
- "subFlow",
- "watch",
- "ask")),
- jSubSourceClass -> (graphHelpers ++ Set("groupBy",
- "splitAfter",
- "splitWhen",
- "subFlow",
- "watch",
- "ask")),
- sFlowClass -> Set("of"),
- sSourceClass -> Set("adapt", "from", "watch"),
- sSinkClass -> Set("adapt"),
- sSubFlowClass -> Set(),
- sSubSourceClass -> Set(),
- sRunnableGraphClass -> Set("builder"))
+ val allowMissing: Map[Class[_], Set[String]] = Map(
+ jFlowClass -> graphHelpers,
+ jSourceClass -> (graphHelpers ++ Set("watch", "ask")),
+ // Java subflows can only be nested using .via and .to (due to type system restrictions)
+ jSubFlowClass -> (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow", "watch", "ask")),
+ jSubSourceClass -> (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow", "watch", "ask")),
+ sFlowClass -> Set("of"),
+ sSourceClass -> Set("adapt", "from", "watch"),
+ sSinkClass -> Set("adapt"),
+ sSubFlowClass -> Set(),
+ sSubSourceClass -> Set(),
+ sRunnableGraphClass -> Set("builder"))
def materializing(m: Method): Boolean = m.getParameterTypes.contains(classOf[ActorMaterializer])
diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala
index 77038600ec..f20c1cc7ff 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala
@@ -12,15 +12,16 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers {
// configuration //
val scalaIgnore =
- Set("equals",
- "hashCode",
- "notify",
- "notifyAll",
- "wait",
- "toString",
- "getClass",
- "shape",
- "identityTraversalBuilder")
+ Set(
+ "equals",
+ "hashCode",
+ "notify",
+ "notifyAll",
+ "wait",
+ "toString",
+ "getClass",
+ "shape",
+ "identityTraversalBuilder")
val javaIgnore =
Set("adapt") // the scaladsl -> javadsl bridge
@@ -77,35 +78,32 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers {
TestCase(name, Some(sClass), Some(jClass), None)
}
- val testCases = Seq(TestCase("Source", scaladsl.Source.getClass, javadsl.Source.getClass),
- TestCase("Flow", scaladsl.Flow.getClass, javadsl.Flow.getClass),
- TestCase("Sink", scaladsl.Sink.getClass, javadsl.Sink.getClass),
- TestCase("BidiFlow", scaladsl.BidiFlow.getClass, javadsl.BidiFlow.getClass),
- TestCase("GraphDSL",
- scaladsl.GraphDSL.getClass,
- javadsl.GraphDSL.getClass,
- classOf[javadsl.GraphCreate]),
- TestCase("ZipWith", Some(scaladsl.ZipWith.getClass), None, Some(javadsl.ZipWith.getClass)),
- TestCase("Merge", scaladsl.Merge.getClass, javadsl.Merge.getClass),
- TestCase("MergePreferred", scaladsl.MergePreferred.getClass, javadsl.MergePreferred.getClass),
- TestCase("Broadcast", scaladsl.Broadcast.getClass, javadsl.Broadcast.getClass),
- TestCase("Balance", scaladsl.Balance.getClass, javadsl.Balance.getClass),
- TestCase("Zip", scaladsl.Zip.getClass, javadsl.Zip.getClass),
- TestCase("UnZip", scaladsl.Unzip.getClass, javadsl.Unzip.getClass),
- TestCase("Concat", scaladsl.Concat.getClass, javadsl.Concat.getClass),
- TestCase("FileIO", scaladsl.FileIO.getClass, javadsl.FileIO.getClass),
- TestCase("StreamConverters",
- scaladsl.StreamConverters.getClass,
- javadsl.StreamConverters.getClass))
+ val testCases = Seq(
+ TestCase("Source", scaladsl.Source.getClass, javadsl.Source.getClass),
+ TestCase("Flow", scaladsl.Flow.getClass, javadsl.Flow.getClass),
+ TestCase("Sink", scaladsl.Sink.getClass, javadsl.Sink.getClass),
+ TestCase("BidiFlow", scaladsl.BidiFlow.getClass, javadsl.BidiFlow.getClass),
+ TestCase("GraphDSL", scaladsl.GraphDSL.getClass, javadsl.GraphDSL.getClass, classOf[javadsl.GraphCreate]),
+ TestCase("ZipWith", Some(scaladsl.ZipWith.getClass), None, Some(javadsl.ZipWith.getClass)),
+ TestCase("Merge", scaladsl.Merge.getClass, javadsl.Merge.getClass),
+ TestCase("MergePreferred", scaladsl.MergePreferred.getClass, javadsl.MergePreferred.getClass),
+ TestCase("Broadcast", scaladsl.Broadcast.getClass, javadsl.Broadcast.getClass),
+ TestCase("Balance", scaladsl.Balance.getClass, javadsl.Balance.getClass),
+ TestCase("Zip", scaladsl.Zip.getClass, javadsl.Zip.getClass),
+ TestCase("UnZip", scaladsl.Unzip.getClass, javadsl.Unzip.getClass),
+ TestCase("Concat", scaladsl.Concat.getClass, javadsl.Concat.getClass),
+ TestCase("FileIO", scaladsl.FileIO.getClass, javadsl.FileIO.getClass),
+ TestCase("StreamConverters", scaladsl.StreamConverters.getClass, javadsl.StreamConverters.getClass))
"Java DSL" must provide {
testCases.foreach {
case TestCase(name, Some(sClass), jClass, jFactoryOption) =>
name.which {
s"allows creating the same ${name}s as Scala DSL" in {
- runSpec(getSMethods(sClass),
- jClass.toList.flatMap(getJMethods) ++
- jFactoryOption.toList.flatMap(f => getJMethods(f).map(unspecializeName.andThen(curryLikeJava))))
+ runSpec(
+ getSMethods(sClass),
+ jClass.toList.flatMap(getJMethods) ++
+ jFactoryOption.toList.flatMap(f => getJMethods(f).map(unspecializeName.andThen(curryLikeJava))))
}
}
}
@@ -121,31 +119,33 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers {
private def toMethod(m: java.lang.reflect.Method): Method =
Method(m.getName, List(m.getParameterTypes: _*), m.getReturnType, m.getDeclaringClass)
- private case class Ignore(cls: Class[_] => Boolean,
- name: String => Boolean,
- parameters: Int => Boolean,
- paramTypes: List[Class[_]] => Boolean)
+ private case class Ignore(
+ cls: Class[_] => Boolean,
+ name: String => Boolean,
+ parameters: Int => Boolean,
+ paramTypes: List[Class[_]] => Boolean)
private def ignore(m: Method): Boolean = {
val ignores = Seq(
- // private scaladsl method
- Ignore(_ == akka.stream.scaladsl.Source.getClass,
- _ == "apply",
- _ == 1,
- _ == List(classOf[akka.stream.impl.SourceModule[_, _]])),
- // corresponding matches on java side would need to have Function23
- Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "apply", _ == 24, _ => true),
- Ignore(_ == akka.stream.scaladsl.Flow.getClass, _ == "apply", _ == 24, _ => true),
- Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "apply", _ == 24, _ => true),
- Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "collection", _ => true, _ => true),
- Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl
- Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRefWithAck", _ => true, _ => true), // Internal in scaladsl
- Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl
- Ignore(_ == akka.stream.scaladsl.BidiFlow.getClass, _ == "apply", _ == 24, _ => true),
- Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "runnable", _ == 24, _ => true),
- Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "create", _ == 24, _ => true),
- // all generated methods like scaladsl.Sink$.akka$stream$scaladsl$Sink$$newOnCompleteStage$1
- Ignore(_ => true, _.contains("$"), _ => true, _ => true))
+ // private scaladsl method
+ Ignore(
+ _ == akka.stream.scaladsl.Source.getClass,
+ _ == "apply",
+ _ == 1,
+ _ == List(classOf[akka.stream.impl.SourceModule[_, _]])),
+ // corresponding matches on java side would need to have Function23
+ Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "apply", _ == 24, _ => true),
+ Ignore(_ == akka.stream.scaladsl.Flow.getClass, _ == "apply", _ == 24, _ => true),
+ Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "apply", _ == 24, _ => true),
+ Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "collection", _ => true, _ => true),
+ Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl
+ Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRefWithAck", _ => true, _ => true), // Internal in scaladsl
+ Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl
+ Ignore(_ == akka.stream.scaladsl.BidiFlow.getClass, _ == "apply", _ == 24, _ => true),
+ Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "runnable", _ == 24, _ => true),
+ Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "create", _ == 24, _ => true),
+ // all generated methods like scaladsl.Sink$.akka$stream$scaladsl$Sink$$newOnCompleteStage$1
+ Ignore(_ => true, _.contains("$"), _ => true, _ => true))
ignores.foldLeft(false) {
case (acc, i) =>
@@ -169,8 +169,9 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers {
*/
private val curryLikeJava: PartialFunction[Method, Method] = {
case m if m.parameterTypes.size > 1 =>
- m.copy(name = m.name.filter(Character.isLetter),
- parameterTypes = m.parameterTypes.dropRight(1) :+ classOf[akka.japi.function.Function[_, _]])
+ m.copy(
+ name = m.name.filter(Character.isLetter),
+ parameterTypes = m.parameterTypes.dropRight(1) :+ classOf[akka.japi.function.Function[_, _]])
case m => m
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala
index 7ddee57f78..586de5a319 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala
@@ -128,8 +128,9 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S
setHandler(shape.in, EagerTerminateInput)
setHandler(shape.out, EagerTerminateOutput)
override def preStart(): Unit =
- readN(shape.in, n)(_ => failStage(new IllegalStateException("Shouldn't happen!")),
- e => emitMultiple(shape.out, e.iterator, () => completeStage()))
+ readN(shape.in, n)(
+ _ => failStage(new IllegalStateException("Shouldn't happen!")),
+ e => emitMultiple(shape.out, e.iterator, () => completeStage()))
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala
index 856e665ba7..4b87b48fad 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala
@@ -572,8 +572,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(sink -> (Attributes.name("test") and Attributes.name("testSink")),
- source -> (Attributes.name("test") and Attributes.name("testSource"))))
+ List(
+ sink -> (Attributes.name("test") and Attributes.name("testSink")),
+ source -> (Attributes.name("test") and Attributes.name("testSource"))))
}
"properly accumulate attributes in chain" in {
@@ -585,8 +586,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(sink -> (Attributes.name("test") and Attributes.name("testSink")),
- source -> (Attributes.name("test") and Attributes.name("source"))))
+ List(
+ sink -> (Attributes.name("test") and Attributes.name("testSink")),
+ source -> (Attributes.name("test") and Attributes.name("source"))))
}
"overwrite last attributes until a new module is added" in {
@@ -600,8 +602,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(sink -> (Attributes.name("test2") and Attributes.name("testSink")),
- source -> (Attributes.name("test2") and Attributes.name("source2"))))
+ List(
+ sink -> (Attributes.name("test2") and Attributes.name("testSink")),
+ source -> (Attributes.name("test2") and Attributes.name("source2"))))
}
"propagate attributes to embedded linear sink and source" in {
@@ -614,8 +617,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(sink -> (Attributes.name("test") and Attributes.name("sink")),
- source -> (Attributes.name("test") and Attributes.name("source"))))
+ List(
+ sink -> (Attributes.name("test") and Attributes.name("sink")),
+ source -> (Attributes.name("test") and Attributes.name("source"))))
}
"propagate attributes to embedded linear flow" in {
@@ -630,9 +634,10 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(sink -> (Attributes.name("test") and Attributes.name("sink")),
- flow1 -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("flow")),
- source -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("source"))))
+ List(
+ sink -> (Attributes.name("test") and Attributes.name("sink")),
+ flow1 -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("flow")),
+ source -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("source"))))
}
"propagate attributes to embedded composite sink" in {
@@ -646,18 +651,20 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(compositeSink -> (Attributes.name("test") and Attributes.name("sink")),
- flow1 -> (Attributes.name("test") and Attributes.name("flow")),
- source -> (Attributes.name("test") and Attributes.name("source"))))
+ List(
+ compositeSink -> (Attributes.name("test") and Attributes.name("sink")),
+ flow1 -> (Attributes.name("test") and Attributes.name("flow")),
+ source -> (Attributes.name("test") and Attributes.name("source"))))
}
"propagate attributes to embedded composite source" in {
val builder =
LinearTraversalBuilder
.empty()
- .append(compositeSource.traversalBuilder.setAttributes(Attributes.name("source")),
- compositeSource.shape,
- Keep.left)
+ .append(
+ compositeSource.traversalBuilder.setAttributes(Attributes.name("source")),
+ compositeSource.shape,
+ Keep.left)
.setAttributes(Attributes.name("source-outer"))
.append(flow1.traversalBuilder.setAttributes(Attributes.name("flow")), flow1.shape, Keep.left)
.append(sink.traversalBuilder.setAttributes(Attributes.name("sink")), compositeSink.shape, Keep.left)
@@ -666,28 +673,31 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(sink -> (Attributes.name("test") and Attributes.name("sink")),
- flow1 -> (Attributes.name("test") and Attributes.name("flow")),
- compositeSource -> (Attributes.name("test") and Attributes.name("source-outer") and Attributes.name(
- "source"))))
+ List(
+ sink -> (Attributes.name("test") and Attributes.name("sink")),
+ flow1 -> (Attributes.name("test") and Attributes.name("flow")),
+ compositeSource -> (Attributes.name("test") and Attributes.name("source-outer") and Attributes.name(
+ "source"))))
}
"propagate attributes to embedded composite flow" in {
val builder =
source.traversalBuilder
.setAttributes(Attributes.name("source"))
- .append(compositeFlow1.traversalBuilder.setAttributes(Attributes.name("flow")),
- compositeFlow1.shape,
- Keep.left)
+ .append(
+ compositeFlow1.traversalBuilder.setAttributes(Attributes.name("flow")),
+ compositeFlow1.shape,
+ Keep.left)
.append(sink.traversalBuilder.setAttributes(Attributes.name("sink")), compositeSink.shape, Keep.left)
.setAttributes(Attributes.name("test"))
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(sink -> (Attributes.name("test") and Attributes.name("sink")),
- compositeFlow1 -> (Attributes.name("test") and Attributes.name("flow")),
- source -> (Attributes.name("test") and Attributes.name("source"))))
+ List(
+ sink -> (Attributes.name("test") and Attributes.name("sink")),
+ compositeFlow1 -> (Attributes.name("test") and Attributes.name("flow")),
+ source -> (Attributes.name("test") and Attributes.name("source"))))
}
"properly append a Source to empty linear" in {
@@ -796,10 +806,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.islandAssignments should ===(
- List((sink, Attributes.none, TestDefaultIsland),
- (flow2, Attributes.none, TestDefaultIsland),
- (flow1, Attributes.name("island2"), TestIsland2),
- (source, Attributes.name("island2") and Attributes.name("island1"), TestIsland1)))
+ List(
+ (sink, Attributes.none, TestDefaultIsland),
+ (flow2, Attributes.none, TestDefaultIsland),
+ (flow1, Attributes.name("island2"), TestIsland2),
+ (source, Attributes.name("island2") and Attributes.name("island1"), TestIsland1)))
}
"properly nest flow with islands" in {
@@ -819,10 +830,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.islandAssignments should ===(
- List((sink, Attributes.none, TestDefaultIsland),
- (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
- (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
- (source, Attributes.none, TestDefaultIsland)))
+ List(
+ (sink, Attributes.none, TestDefaultIsland),
+ (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
+ (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
+ (source, Attributes.none, TestDefaultIsland)))
}
"properly nest flow with island inside another island" in {
@@ -843,10 +855,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.islandAssignments should ===(
- List((sink, Attributes.none, TestDefaultIsland),
- (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
- (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
- (source, Attributes.name("wholeThing"), TestIsland2)))
+ List(
+ (sink, Attributes.none, TestDefaultIsland),
+ (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
+ (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
+ (source, Attributes.name("wholeThing"), TestIsland2)))
}
"properly nest flow with islands starting from linear enclosing a composite" in {
@@ -864,10 +877,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.islandAssignments should ===(
- List((sink, Attributes.none, TestDefaultIsland),
- (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
- (compositeFlow1, Attributes.name("wholeThing"), TestIsland2),
- (source, Attributes.name("wholeThing"), TestIsland2)))
+ List(
+ (sink, Attributes.none, TestDefaultIsland),
+ (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
+ (compositeFlow1, Attributes.name("wholeThing"), TestIsland2),
+ (source, Attributes.name("wholeThing"), TestIsland2)))
}
"properly nest flow containing composite with islands" in {
@@ -886,10 +900,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.islandAssignments should ===(
- List((sink, Attributes.none, TestDefaultIsland),
- (compositeFlow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
- (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
- (source, Attributes.none, TestDefaultIsland)))
+ List(
+ (sink, Attributes.none, TestDefaultIsland),
+ (compositeFlow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
+ (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1),
+ (source, Attributes.none, TestDefaultIsland)))
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala
index cfc897719f..aa617791f3 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala
@@ -57,12 +57,13 @@ class TimeoutsSpec extends StreamSpec {
"pass through error unmodified" in assertAllStagesStopped {
a[TE] shouldBe thrownBy {
- Await.result(Source(1 to 100)
- .concat(Source.failed(TE("test")))
- .completionTimeout(2.seconds)
- .grouped(200)
- .runWith(Sink.head),
- 3.seconds)
+ Await.result(
+ Source(1 to 100)
+ .concat(Source.failed(TE("test")))
+ .completionTimeout(2.seconds)
+ .grouped(200)
+ .runWith(Sink.head),
+ 3.seconds)
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala
index 3ca897bf7e..2e956dedec 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala
@@ -352,8 +352,9 @@ class TraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(source -> (Attributes.name("test") and Attributes.name("testSource")),
- sink -> (Attributes.name("test") and Attributes.name("testSink"))))
+ List(
+ source -> (Attributes.name("test") and Attributes.name("testSource")),
+ sink -> (Attributes.name("test") and Attributes.name("testSink"))))
}
"overwrite last attributes until embedded in other builder" in {
@@ -373,8 +374,9 @@ class TraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(source -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSource")),
- sink -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSinkB"))))
+ List(
+ source -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSource")),
+ sink -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSinkB"))))
}
"propagate attributes to embedded flow" in {
@@ -391,9 +393,10 @@ class TraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.attributesAssignments should ===(
- List(source -> (Attributes.name("test") and Attributes.name("testSource")),
- flow1 -> (Attributes.name("test") and Attributes.name("flow")),
- sink -> (Attributes.name("test") and Attributes.name("testSink"))))
+ List(
+ source -> (Attributes.name("test") and Attributes.name("testSource")),
+ flow1 -> (Attributes.name("test") and Attributes.name("flow")),
+ sink -> (Attributes.name("test") and Attributes.name("testSink"))))
}
"properly track embedded island and its attributes" in {
@@ -410,9 +413,10 @@ class TraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.islandAssignments should ===(
- List((source, Attributes.none, TestDefaultIsland),
- (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1),
- (sink, Attributes.none, TestDefaultIsland)))
+ List(
+ (source, Attributes.none, TestDefaultIsland),
+ (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1),
+ (sink, Attributes.none, TestDefaultIsland)))
}
"properly ignore redundant island assignment" in {
@@ -429,9 +433,10 @@ class TraversalBuilderSpec extends AkkaSpec {
val mat = testMaterialize(builder)
mat.islandAssignments should ===(
- List((source, Attributes.none, TestDefaultIsland),
- (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1),
- (sink, Attributes.none, TestDefaultIsland)))
+ List(
+ (source, Attributes.none, TestDefaultIsland),
+ (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1),
+ (sink, Attributes.none, TestDefaultIsland)))
}
//TODO: Dummy test cases just for smoke-testing. Should be removed.
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala
index 06552c6270..a3e2467a00 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala
@@ -68,12 +68,13 @@ object TraversalTestUtils {
override def toString = s"TestFlow$tag"
}
- class MaterializationResult(val connections: Int,
- val inlets: Array[InPort],
- val outlets: Array[OutPort],
- val matValue: Any,
- val attributesAssignments: List[(AtomicModule[Shape, Any], Attributes)],
- val islandAssignments: List[(AtomicModule[Shape, Any], Attributes, IslandTag)]) {
+ class MaterializationResult(
+ val connections: Int,
+ val inlets: Array[InPort],
+ val outlets: Array[OutPort],
+ val matValue: Any,
+ val attributesAssignments: List[(AtomicModule[Shape, Any], Attributes)],
+ val islandAssignments: List[(AtomicModule[Shape, Any], Attributes, IslandTag)]) {
override def toString = {
outlets.iterator.zip(inlets.iterator).mkString("connections: ", ", ", "")
@@ -173,12 +174,13 @@ object TraversalTestUtils {
}
}
- new MaterializationResult(connections,
- inlets,
- outlets,
- matValueStack.peekLast(),
- attributesResult.reverse,
- islandsResult.reverse)
+ new MaterializationResult(
+ connections,
+ inlets,
+ outlets,
+ matValueStack.peekLast(),
+ attributesResult.reverse,
+ islandsResult.reverse)
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala
index 3868ef62bd..6bc2deefe7 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala
@@ -36,8 +36,9 @@ class ActorGraphInterpreterSpec extends StreamSpec {
"be able to reuse a simple identity graph stage" in assertAllStagesStopped {
val identity = GraphStages.identity[Int]
- Await.result(Source(1 to 100).via(identity).via(identity).via(identity).grouped(200).runWith(Sink.head),
- 3.seconds) should ===(1 to 100)
+ Await.result(
+ Source(1 to 100).via(identity).via(identity).via(identity).grouped(200).runWith(Sink.head),
+ 3.seconds) should ===(1 to 100)
}
"be able to interpret a simple bidi stage" in assertAllStagesStopped {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala
index 0b41071cbf..07e70f843e 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala
@@ -160,9 +160,9 @@ class AsyncCallbackSpec extends AkkaSpec {
val callback = Source
.fromPublisher(in)
.viaMat(
- new AsyncCallbackGraphStage(probe.ref,
- Some(asyncCb =>
- earlyFeedback.completeWith(asyncCb.invokeWithFeedback("early")))))(Keep.right)
+ new AsyncCallbackGraphStage(
+ probe.ref,
+ Some(asyncCb => earlyFeedback.completeWith(asyncCb.invokeWithFeedback("early")))))(Keep.right)
.to(Sink.ignore)
.run()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala
index 34329c4824..006de4ae45 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala
@@ -31,10 +31,11 @@ object GraphInterpreterSpecKit {
* @param attributes Optional set of attributes to pass to the stages when creating the logics
* @return Created logics and the maps of all inlets respective outlets to those logics
*/
- private[stream] def createLogics(stages: Array[GraphStageWithMaterializedValue[_ <: Shape, _]],
- upstreams: Array[UpstreamBoundaryStageLogic[_]],
- downstreams: Array[DownstreamBoundaryStageLogic[_]],
- attributes: Array[Attributes] = Array.empty)
+ private[stream] def createLogics(
+ stages: Array[GraphStageWithMaterializedValue[_ <: Shape, _]],
+ upstreams: Array[UpstreamBoundaryStageLogic[_]],
+ downstreams: Array[DownstreamBoundaryStageLogic[_]],
+ attributes: Array[Attributes] = Array.empty)
: (Array[GraphStageLogic], SMap[Inlet[_], GraphStageLogic], SMap[Outlet[_], GraphStageLogic]) = {
if (attributes.nonEmpty && attributes.length != stages.length)
throw new IllegalArgumentException("Attributes must be either empty or one per stage")
@@ -115,11 +116,12 @@ object GraphInterpreterSpecKit {
val outOwner = window(0)
val inOwner = window(1)
- val connection = new Connection(id = idx,
- outOwner = outOwner,
- outHandler = outOwner.outHandler(0),
- inOwner = inOwner,
- inHandler = inOwner.inHandler(0))
+ val connection = new Connection(
+ id = idx,
+ outOwner = outOwner,
+ outHandler = outOwner.outHandler(0),
+ inOwner = inOwner,
+ inHandler = inOwner.inHandler(0))
outOwner.portToConn(outOwner.inCount) = connection
inOwner.portToConn(0) = connection
@@ -132,10 +134,11 @@ object GraphInterpreterSpecKit {
/**
* Create interpreter connections for all the given `connectedPorts`.
*/
- private[stream] def createConnections(logics: Seq[GraphStageLogic],
- connectedPorts: Seq[(Outlet[_], Inlet[_])],
- inOwners: SMap[Inlet[_], GraphStageLogic],
- outOwners: SMap[Outlet[_], GraphStageLogic]): Array[Connection] = {
+ private[stream] def createConnections(
+ logics: Seq[GraphStageLogic],
+ connectedPorts: Seq[(Outlet[_], Inlet[_])],
+ inOwners: SMap[Inlet[_], GraphStageLogic],
+ outOwners: SMap[Outlet[_], GraphStageLogic]): Array[Connection] = {
val connections = new Array[Connection](connectedPorts.size)
connectedPorts.zipWithIndex.foreach {
@@ -143,11 +146,12 @@ object GraphInterpreterSpecKit {
val outOwner = outOwners(outlet)
val inOwner = inOwners(inlet)
- val connection = new Connection(id = idx,
- outOwner = outOwner,
- outHandler = outOwner.outHandler(outlet.id),
- inOwner = inOwner,
- inHandler = inOwner.inHandler(inlet.id))
+ val connection = new Connection(
+ id = idx,
+ outOwner = outOwner,
+ outHandler = outOwner.outHandler(outlet.id),
+ inOwner = inOwner,
+ inHandler = inOwner.inHandler(inlet.id))
connections(idx) = connection
inOwner.portToConn(inlet.id) = connection
@@ -249,13 +253,14 @@ trait GraphInterpreterSpecKit extends StreamSpec {
}
def manualInit(logics: Array[GraphStageLogic], connections: Array[Connection]): Unit = {
- _interpreter = new GraphInterpreter(NoMaterializer,
- logger,
- logics,
- connections,
- onAsyncInput = (_, _, _, _) => (),
- fuzzingMode = false,
- context = null)
+ _interpreter = new GraphInterpreter(
+ NoMaterializer,
+ logger,
+ logics,
+ connections,
+ onAsyncInput = (_, _, _, _) => (),
+ fuzzingMode = false,
+ context = null)
_interpreter.init(null)
}
@@ -532,15 +537,16 @@ trait GraphInterpreterSpecKit extends StreamSpec {
val out = Outlet[TT]("out")
out.id = 0
- setHandler(out,
- new OutHandler {
- override def onPull(): Unit = {
- if (lastEvent.contains(RequestOne)) lastEvent += RequestAnother
- else lastEvent += RequestOne
- }
+ setHandler(
+ out,
+ new OutHandler {
+ override def onPull(): Unit = {
+ if (lastEvent.contains(RequestOne)) lastEvent += RequestAnother
+ else lastEvent += RequestOne
+ }
- override def onDownstreamFinish(): Unit = lastEvent += Cancel
- })
+ override def onDownstreamFinish(): Unit = lastEvent += Cancel
+ })
def onNext(elem: TT): Unit = {
push(out, elem)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala
index fa2b49de4d..f4cab0ecc4 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala
@@ -43,9 +43,10 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
lastEvents() should be(Set(OnComplete))
}
- "implement chain of maps correctly" in new OneBoundedSetup[Int](Map((x: Int) => x + 1),
- Map((x: Int) => x * 2),
- Map((x: Int) => x + 1)) {
+ "implement chain of maps correctly" in new OneBoundedSetup[Int](
+ Map((x: Int) => x + 1),
+ Map((x: Int) => x * 2),
+ Map((x: Int) => x + 1)) {
lastEvents() should be(Set.empty)
@@ -78,8 +79,9 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
lastEvents() should be(Set(OnComplete))
}
- "implement one-to-many many-to-one chain correctly" in new OneBoundedSetup[Int](Doubler(),
- Filter((x: Int) => x != 0)) {
+ "implement one-to-many many-to-one chain correctly" in new OneBoundedSetup[Int](
+ Doubler(),
+ Filter((x: Int) => x != 0)) {
lastEvents() should be(Set.empty)
@@ -102,8 +104,9 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
lastEvents() should be(Set(OnComplete))
}
- "implement many-to-one one-to-many chain correctly" in new OneBoundedSetup[Int](Filter((x: Int) => x != 0),
- Doubler()) {
+ "implement many-to-one one-to-many chain correctly" in new OneBoundedSetup[Int](
+ Filter((x: Int) => x != 0),
+ Doubler()) {
lastEvents() should be(Set.empty)
@@ -143,9 +146,10 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
lastEvents() should be(Set(OnNext(1), Cancel, OnComplete))
}
- "implement take inside a chain" in new OneBoundedSetup[Int](Filter((x: Int) => x != 0),
- takeTwo,
- Map((x: Int) => x + 1)) {
+ "implement take inside a chain" in new OneBoundedSetup[Int](
+ Filter((x: Int) => x != 0),
+ takeTwo,
+ Map((x: Int) => x + 1)) {
lastEvents() should be(Set.empty)
@@ -329,8 +333,9 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
}
- "work with expand-expand" in new OneBoundedSetup[Int](new Expand((x: Int) => Iterator.from(x)),
- new Expand((x: Int) => Iterator.from(x))) {
+ "work with expand-expand" in new OneBoundedSetup[Int](
+ new Expand((x: Int) => Iterator.from(x)),
+ new Expand((x: Int) => Iterator.from(x))) {
lastEvents() should be(Set(RequestOne))
@@ -409,11 +414,12 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
}
// Note, the new interpreter has no jumpback table, still did not want to remove the test
- "work with jumpback table and completed elements" in new OneBoundedSetup[Int](Map((x: Int) => x),
- Map((x: Int) => x),
- KeepGoing(),
- Map((x: Int) => x),
- Map((x: Int) => x)) {
+ "work with jumpback table and completed elements" in new OneBoundedSetup[Int](
+ Map((x: Int) => x),
+ Map((x: Int) => x),
+ KeepGoing(),
+ Map((x: Int) => x),
+ Map((x: Int) => x)) {
lastEvents() should be(Set.empty)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala
index 229596e9a1..b2f963d05a 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala
@@ -52,8 +52,9 @@ class InterpreterSupervisionSpec extends StreamSpec with GraphInterpreterSpecKit
lastEvents() should be(Set(Cancel, OnError(TE)))
}
- "resume when Map throws" in new OneBoundedSetupWithDecider[Int](Supervision.resumingDecider,
- Map((x: Int) => if (x == 0) throw TE else x)) {
+ "resume when Map throws" in new OneBoundedSetupWithDecider[Int](
+ Supervision.resumingDecider,
+ Map((x: Int) => if (x == 0) throw TE else x)) {
downstream.requestOne()
lastEvents() should be(Set(RequestOne))
upstream.onNext(2)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala
index e906a57ca9..ac27519ce3 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala
@@ -28,12 +28,15 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
}
"call postStop in order on stages - when upstream completes" in new OneBoundedSetup[String](
- PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-a",
- onStop = () => testActor ! "stop-a"),
- PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-b",
- onStop = () => testActor ! "stop-b"),
- PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-c",
- onStop = () => testActor ! "stop-c")) {
+ PreStartAndPostStopIdentity(
+ onUpstreamCompleted = () => testActor ! "complete-a",
+ onStop = () => testActor ! "stop-a"),
+ PreStartAndPostStopIdentity(
+ onUpstreamCompleted = () => testActor ! "complete-b",
+ onStop = () => testActor ! "stop-b"),
+ PreStartAndPostStopIdentity(
+ onUpstreamCompleted = () => testActor ! "complete-c",
+ onStop = () => testActor ! "stop-c")) {
upstream.onComplete()
expectMsg("complete-a")
expectMsg("stop-a")
@@ -45,8 +48,9 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
}
"call postStop in order on stages - when upstream onErrors" in new OneBoundedSetup[String](
- PreStartAndPostStopIdentity(onUpstreamFailed = ex => testActor ! ex.getMessage,
- onStop = () => testActor ! "stop-c")) {
+ PreStartAndPostStopIdentity(
+ onUpstreamFailed = ex => testActor ! ex.getMessage,
+ onStop = () => testActor ! "stop-c")) {
val msg = "Boom! Boom! Boom!"
upstream.onError(TE(msg))
expectMsg(msg)
@@ -83,10 +87,10 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
lastEvents() should ===(Set(OnComplete))
}
- "onError when preStart fails with stages after" in new OneBoundedSetup[String](Map((x: Int) => x),
- PreStartFailer(
- () => throw TE("Boom!")),
- Map((x: Int) => x)) {
+ "onError when preStart fails with stages after" in new OneBoundedSetup[String](
+ Map((x: Int) => x),
+ PreStartFailer(() => throw TE("Boom!")),
+ Map((x: Int) => x)) {
lastEvents() should ===(Set(Cancel, OnError(TE("Boom!"))))
}
@@ -139,10 +143,11 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
}
- private[akka] case class PreStartAndPostStopIdentity[T](onStart: () => Unit = () => (),
- onStop: () => Unit = () => (),
- onUpstreamCompleted: () => Unit = () => (),
- onUpstreamFailed: Throwable => Unit = ex => ())
+ private[akka] case class PreStartAndPostStopIdentity[T](
+ onStart: () => Unit = () => (),
+ onStop: () => Unit = () => (),
+ onUpstreamCompleted: () => Unit = () => (),
+ onUpstreamFailed: Throwable => Unit = ex => ())
extends SimpleLinearGraphStage[T] {
override def createLogic(attributes: Attributes): GraphStageLogic =
diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala
index 65c96e1b6a..50058f5489 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala
@@ -223,10 +223,11 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
r.futureValue.status.isFailure shouldBe true
}
- List(Settings(chunkSize = 512, readAhead = 2),
- Settings(chunkSize = 512, readAhead = 4),
- Settings(chunkSize = 2048, readAhead = 2),
- Settings(chunkSize = 2048, readAhead = 4)).foreach { settings =>
+ List(
+ Settings(chunkSize = 512, readAhead = 2),
+ Settings(chunkSize = 512, readAhead = 4),
+ Settings(chunkSize = 2048, readAhead = 2),
+ Settings(chunkSize = 2048, readAhead = 4)).foreach { settings =>
import settings._
s"count lines in real file (chunkSize = $chunkSize, readAhead = $readAhead)" in {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala
index 8190cd4441..e4d203ce80 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala
@@ -57,20 +57,21 @@ class InputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"emit as soon as read" in assertAllStagesStopped {
val latch = new CountDownLatch(1)
val probe = StreamConverters
- .fromInputStream(() =>
- new InputStream {
- @volatile var emitted = false
- override def read(): Int = {
- if (!emitted) {
- emitted = true
- 'M'.toInt
- } else {
- latch.await()
- -1
- }
- }
- },
- chunkSize = 1)
+ .fromInputStream(
+ () =>
+ new InputStream {
+ @volatile var emitted = false
+ override def read(): Int = {
+ if (!emitted) {
+ emitted = true
+ 'M'.toInt
+ } else {
+ latch.await()
+ -1
+ }
+ }
+ },
+ chunkSize = 1)
.runWith(TestSink.probe)
probe.request(4)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala
index 26e5d2b41c..2321a0feb5 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala
@@ -98,8 +98,9 @@ class TcpSpec extends StreamSpec("""
val tcpWriteProbe = new TcpWriteProbe()
val future = Source
.fromPublisher(tcpWriteProbe.publisherProbe)
- .viaMat(Tcp().outgoingConnection(InetSocketAddress.createUnresolved("example.com", 666),
- connectTimeout = 1.second))(Keep.right)
+ .viaMat(
+ Tcp().outgoingConnection(InetSocketAddress.createUnresolved("example.com", 666), connectTimeout = 1.second))(
+ Keep.right)
.toMat(Sink.ignore)(Keep.left)
.run()
@@ -454,8 +455,9 @@ class TcpSpec extends StreamSpec("""
}
"handle when connection actor terminates unexpectedly" in {
- val system2 = ActorSystem("TcpSpec-unexpected-system2",
- ConfigFactory.parseString("""
+ val system2 = ActorSystem(
+ "TcpSpec-unexpected-system2",
+ ConfigFactory.parseString("""
akka.loglevel = DEBUG # issue #21660
""").withFallback(system.settings.config))
@@ -783,13 +785,13 @@ class TcpSpec extends StreamSpec("""
Tcp()
.bindAndHandleTls(
- // just echo charactes until we reach '\n', then complete stream
- // also - byte is our framing
- Flow[ByteString].mapConcat(_.utf8String.toList).takeWhile(_ != '\n').map(c => ByteString(c)),
- address.getHostName,
- address.getPort,
- sslContext,
- firstSession)
+ // just echo charactes until we reach '\n', then complete stream
+ // also - byte is our framing
+ Flow[ByteString].mapConcat(_.utf8String.toList).takeWhile(_ != '\n').map(c => ByteString(c)),
+ address.getHostName,
+ address.getPort,
+ sslContext,
+ firstSession)
.futureValue
system.log.info(s"Server bound to ${address.getHostString}:${address.getPort}")
@@ -862,10 +864,11 @@ class TcpSpec extends StreamSpec("""
}
- def validateServerClientCommunication(testData: ByteString,
- serverConnection: ServerConnection,
- readProbe: TcpReadProbe,
- writeProbe: TcpWriteProbe): Unit = {
+ def validateServerClientCommunication(
+ testData: ByteString,
+ serverConnection: ServerConnection,
+ readProbe: TcpReadProbe,
+ writeProbe: TcpWriteProbe): Unit = {
serverConnection.write(testData)
serverConnection.read(5)
readProbe.read(5) should be(testData)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala
index c649013fa3..7870c24b90 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala
@@ -128,23 +128,26 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing
}
trait CommunicationSetup extends Named {
- def decorateFlow(leftClosing: TLSClosing,
- rightClosing: TLSClosing,
- rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]): Flow[SslTlsOutbound, SslTlsInbound, NotUsed]
+ def decorateFlow(
+ leftClosing: TLSClosing,
+ rightClosing: TLSClosing,
+ rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]): Flow[SslTlsOutbound, SslTlsInbound, NotUsed]
def cleanup(): Unit = ()
}
object ClientInitiates extends CommunicationSetup {
- def decorateFlow(leftClosing: TLSClosing,
- rightClosing: TLSClosing,
- rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) =
+ def decorateFlow(
+ leftClosing: TLSClosing,
+ rightClosing: TLSClosing,
+ rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) =
clientTls(leftClosing).atop(serverTls(rightClosing).reversed).join(rhs)
}
object ServerInitiates extends CommunicationSetup {
- def decorateFlow(leftClosing: TLSClosing,
- rightClosing: TLSClosing,
- rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) =
+ def decorateFlow(
+ leftClosing: TLSClosing,
+ rightClosing: TLSClosing,
+ rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) =
serverTls(leftClosing).atop(clientTls(rightClosing).reversed).join(rhs)
}
@@ -155,9 +158,10 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing
object ClientInitiatesViaTcp extends CommunicationSetup {
var binding: Tcp.ServerBinding = null
- def decorateFlow(leftClosing: TLSClosing,
- rightClosing: TLSClosing,
- rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = {
+ def decorateFlow(
+ leftClosing: TLSClosing,
+ rightClosing: TLSClosing,
+ rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = {
binding = server(serverTls(rightClosing).reversed.join(rhs))
clientTls(leftClosing).join(Tcp().outgoingConnection(binding.localAddress))
}
@@ -166,9 +170,10 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing
object ServerInitiatesViaTcp extends CommunicationSetup {
var binding: Tcp.ServerBinding = null
- def decorateFlow(leftClosing: TLSClosing,
- rightClosing: TLSClosing,
- rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = {
+ def decorateFlow(
+ leftClosing: TLSClosing,
+ rightClosing: TLSClosing,
+ rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = {
binding = server(clientTls(rightClosing).reversed.join(rhs))
serverTls(leftClosing).join(Tcp().outgoingConnection(binding.localAddress))
}
@@ -333,17 +338,18 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing
}
val scenarios =
- Seq(SingleBytes,
- MediumMessages,
- LargeMessages,
- EmptyBytesFirst,
- EmptyBytesInTheMiddle,
- EmptyBytesLast,
- CancellingRHS,
- SessionRenegotiationBySender,
- SessionRenegotiationByReceiver,
- SessionRenegotiationFirstOne,
- SessionRenegotiationFirstTwo)
+ Seq(
+ SingleBytes,
+ MediumMessages,
+ LargeMessages,
+ EmptyBytesFirst,
+ EmptyBytesInTheMiddle,
+ EmptyBytesLast,
+ CancellingRHS,
+ SessionRenegotiationBySender,
+ SessionRenegotiationByReceiver,
+ SessionRenegotiationFirstOne,
+ SessionRenegotiationFirstTwo)
for {
commPattern <- communicationPatterns
diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala
index a5a99f8223..e5b4fe2c9b 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala
@@ -24,8 +24,9 @@ object CompressionTestingTools {
future.value.get match {
case Success(t) => t
case Failure(ex) =>
- throw new RuntimeException("Trying to await result of failed Future, see the cause for the original problem.",
- ex)
+ throw new RuntimeException(
+ "Trying to await result of failed Future, see the cause for the original problem.",
+ ex)
}
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala
index 4a15f3f817..2ed5deea20 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala
@@ -19,11 +19,13 @@ class BidiFlowSpec extends StreamSpec {
implicit val materializer = ActorMaterializer()
- val bidi = BidiFlow.fromFlows(Flow[Int].map(x => x.toLong + 2).withAttributes(name("top")),
- Flow[ByteString].map(_.decodeString("UTF-8")).withAttributes(name("bottom")))
+ val bidi = BidiFlow.fromFlows(
+ Flow[Int].map(x => x.toLong + 2).withAttributes(name("top")),
+ Flow[ByteString].map(_.decodeString("UTF-8")).withAttributes(name("bottom")))
- val inverse = BidiFlow.fromFlows(Flow[Long].map(x => x.toInt + 2).withAttributes(name("top")),
- Flow[String].map(ByteString(_)).withAttributes(name("bottom")))
+ val inverse = BidiFlow.fromFlows(
+ Flow[Long].map(x => x.toInt + 2).withAttributes(name("top")),
+ Flow[String].map(ByteString(_)).withAttributes(name("bottom")))
val bidiMat = BidiFlow.fromGraph(GraphDSL.create(Sink.head[Int]) { implicit b => s =>
Source.single(42) ~> s
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala
index 613a91a76a..8adab4d634 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala
@@ -85,8 +85,9 @@ class FlowAskSpec extends StreamSpec {
val dontReply = system.actorOf(TestActors.blackholeProps.withDispatcher("akka.test.stream-dispatcher"), "dontReply")
val replyRandomDelays =
- system.actorOf(Props(classOf[RandomDelaysReplier]).withDispatcher("akka.test.stream-dispatcher"),
- "replyRandomDelays")
+ system.actorOf(
+ Props(classOf[RandomDelaysReplier]).withDispatcher("akka.test.stream-dispatcher"),
+ "replyRandomDelays")
val statusReplier =
system.actorOf(Props(new StatusReplier).withDispatcher("akka.test.stream-dispatcher"), "statusReplier")
@@ -234,11 +235,12 @@ class FlowAskSpec extends StreamSpec {
}
"resume after multiple failures" in assertAllStagesStopped {
- Await.result(Source(1 to 6)
- .ask[Reply](2)(failAllExcept6)
- .withAttributes(supervisionStrategy(resumingDecider))
- .runWith(Sink.head),
- 3.seconds) should ===(Reply(6))
+ Await.result(
+ Source(1 to 6)
+ .ask[Reply](2)(failAllExcept6)
+ .withAttributes(supervisionStrategy(resumingDecider))
+ .runWith(Sink.head),
+ 3.seconds) should ===(Reply(6))
}
"should handle cancel properly" in assertAllStagesStopped {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala
index ae62add410..0b1e16c2f2 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala
@@ -92,30 +92,33 @@ class FlowDelaySpec extends StreamSpec {
}
"drop tail for internal buffer if it's full in DropTail mode" in assertAllStagesStopped {
- Await.result(Source(1 to 20)
- .delay(1.seconds, DelayOverflowStrategy.dropTail)
- .withAttributes(inputBuffer(16, 16))
- .grouped(100)
- .runWith(Sink.head),
- 1200.millis) should ===((1 to 15).toList :+ 20)
+ Await.result(
+ Source(1 to 20)
+ .delay(1.seconds, DelayOverflowStrategy.dropTail)
+ .withAttributes(inputBuffer(16, 16))
+ .grouped(100)
+ .runWith(Sink.head),
+ 1200.millis) should ===((1 to 15).toList :+ 20)
}
"drop head for internal buffer if it's full in DropHead mode" in assertAllStagesStopped {
- Await.result(Source(1 to 20)
- .delay(1.seconds, DelayOverflowStrategy.dropHead)
- .withAttributes(inputBuffer(16, 16))
- .grouped(100)
- .runWith(Sink.head),
- 1200.millis) should ===(5 to 20)
+ Await.result(
+ Source(1 to 20)
+ .delay(1.seconds, DelayOverflowStrategy.dropHead)
+ .withAttributes(inputBuffer(16, 16))
+ .grouped(100)
+ .runWith(Sink.head),
+ 1200.millis) should ===(5 to 20)
}
"clear all for internal buffer if it's full in DropBuffer mode" in assertAllStagesStopped {
- Await.result(Source(1 to 20)
- .delay(1.seconds, DelayOverflowStrategy.dropBuffer)
- .withAttributes(inputBuffer(16, 16))
- .grouped(100)
- .runWith(Sink.head),
- 1200.millis) should ===(17 to 20)
+ Await.result(
+ Source(1 to 20)
+ .delay(1.seconds, DelayOverflowStrategy.dropBuffer)
+ .withAttributes(inputBuffer(16, 16))
+ .grouped(100)
+ .runWith(Sink.head),
+ 1200.millis) should ===(17 to 20)
}
"pass elements with delay through normally in backpressured mode" in assertAllStagesStopped {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala
index 7e3d4bc77e..4c40d22127 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala
@@ -13,8 +13,9 @@ class FlowDispatcherSpec extends StreamSpec(s"my-dispatcher = $${akka.test.strea
val defaultSettings = ActorMaterializerSettings(system)
- def testDispatcher(settings: ActorMaterializerSettings = defaultSettings,
- dispatcher: String = "akka.test.stream-dispatcher") = {
+ def testDispatcher(
+ settings: ActorMaterializerSettings = defaultSettings,
+ dispatcher: String = "akka.test.stream-dispatcher") = {
implicit val materializer = ActorMaterializer(settings)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala
index 318248468d..bfdb122e19 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala
@@ -222,14 +222,15 @@ class FlowFlattenMergeSpec extends StreamSpec {
"work with mix of Source.single and other sources when slow demand" in assertAllStagesStopped {
val sources: Source[Source[Int, NotUsed], NotUsed] = Source(
- List(Source.single(0),
- Source.single(1),
- Source(2 to 4),
- Source.single(5),
- Source(6 to 6),
- Source.single(7),
- Source(8 to 10),
- Source.single(11)))
+ List(
+ Source.single(0),
+ Source.single(1),
+ Source(2 to 4),
+ Source.single(5),
+ Source(6 to 6),
+ Source.single(7),
+ Source(8 to 10),
+ Source.single(11)))
val probe =
sources.flatMapConcat(identity).runWith(TestSink.probe)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala
index 9d6f303ac7..acd3056a71 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala
@@ -166,12 +166,13 @@ class FlowFoldAsyncSpec extends StreamSpec {
}
"resume after multiple failures" in assertAllStagesStopped {
- val futures: List[Future[String]] = List(Future.failed(Utils.TE("failure1")),
- Future.failed(Utils.TE("failure2")),
- Future.failed(Utils.TE("failure3")),
- Future.failed(Utils.TE("failure4")),
- Future.failed(Utils.TE("failure5")),
- Future.successful("happy!"))
+ val futures: List[Future[String]] = List(
+ Future.failed(Utils.TE("failure1")),
+ Future.failed(Utils.TE("failure2")),
+ Future.failed(Utils.TE("failure3")),
+ Future.failed(Utils.TE("failure4")),
+ Future.failed(Utils.TE("failure5")),
+ Future.successful("happy!"))
Source(futures)
.foldAsync("") { (_, s) =>
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala
index b33923efda..eff38b2c92 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala
@@ -26,8 +26,9 @@ class FlowIdleInjectSpec extends StreamSpec {
"emit elements periodically after silent periods" in assertAllStagesStopped {
val sourceWithIdleGap = Source(1 to 5) ++ Source(6 to 10).initialDelay(2.second)
- val result = Await.result(sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head),
- 3.seconds) should ===(List(1, 2, 3, 4, 5, 0, 0, 0, 6, 7, 8, 9, 10))
+ val result = Await.result(
+ sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head),
+ 3.seconds) should ===(List(1, 2, 3, 4, 5, 0, 0, 0, 6, 7, 8, 9, 10))
}
"immediately pull upstream" in {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala
index 5aa1b8454e..c97868c290 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala
@@ -122,9 +122,10 @@ class FlowLogSpec extends StreamSpec("""
}
"allow configuring log levels via Attributes" in {
- val logAttrs = Attributes.logLevels(onElement = Logging.WarningLevel,
- onFinish = Logging.InfoLevel,
- onFailure = Logging.DebugLevel)
+ val logAttrs = Attributes.logLevels(
+ onElement = Logging.WarningLevel,
+ onFinish = Logging.InfoLevel,
+ onFailure = Logging.DebugLevel)
Source
.single(42)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
index bde1750c82..868c62ada4 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
@@ -287,12 +287,13 @@ class FlowMapAsyncSpec extends StreamSpec {
}
"resume after multiple failures" in assertAllStagesStopped {
- val futures: List[Future[String]] = List(Future.failed(Utils.TE("failure1")),
- Future.failed(Utils.TE("failure2")),
- Future.failed(Utils.TE("failure3")),
- Future.failed(Utils.TE("failure4")),
- Future.failed(Utils.TE("failure5")),
- Future.successful("happy!"))
+ val futures: List[Future[String]] = List(
+ Future.failed(Utils.TE("failure1")),
+ Future.failed(Utils.TE("failure2")),
+ Future.failed(Utils.TE("failure3")),
+ Future.failed(Utils.TE("failure4")),
+ Future.failed(Utils.TE("failure5")),
+ Future.successful("happy!"))
Await.result(
Source(futures).mapAsync(2)(identity).withAttributes(supervisionStrategy(resumingDecider)).runWith(Sink.head),
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
index 3cdf3ed0c8..6730481e6c 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
@@ -169,18 +169,20 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec {
}
"resume after multiple failures" in assertAllStagesStopped {
- val futures: List[Future[String]] = List(Future.failed(Utils.TE("failure1")),
- Future.failed(Utils.TE("failure2")),
- Future.failed(Utils.TE("failure3")),
- Future.failed(Utils.TE("failure4")),
- Future.failed(Utils.TE("failure5")),
- Future.successful("happy!"))
+ val futures: List[Future[String]] = List(
+ Future.failed(Utils.TE("failure1")),
+ Future.failed(Utils.TE("failure2")),
+ Future.failed(Utils.TE("failure3")),
+ Future.failed(Utils.TE("failure4")),
+ Future.failed(Utils.TE("failure5")),
+ Future.successful("happy!"))
- Await.result(Source(futures)
- .mapAsyncUnordered(2)(identity)
- .withAttributes(supervisionStrategy(resumingDecider))
- .runWith(Sink.head),
- 3.seconds) should ===("happy!")
+ Await.result(
+ Source(futures)
+ .mapAsyncUnordered(2)(identity)
+ .withAttributes(supervisionStrategy(resumingDecider))
+ .runWith(Sink.head),
+ 3.seconds) should ===("happy!")
}
"finish after future failure" in assertAllStagesStopped {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
index bf7062c230..f42a786a46 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
@@ -18,12 +18,13 @@ class FlowMapConcatSpec extends StreamSpec with ScriptedTest {
"A MapConcat" must {
"map and concat" in {
- val script = Script(Seq(0) -> Seq(),
- Seq(1) -> Seq(1),
- Seq(2) -> Seq(2, 2),
- Seq(3) -> Seq(3, 3, 3),
- Seq(2) -> Seq(2, 2),
- Seq(1) -> Seq(1))
+ val script = Script(
+ Seq(0) -> Seq(),
+ Seq(1) -> Seq(1),
+ Seq(2) -> Seq(2, 2),
+ Seq(3) -> Seq(3, 3, 3),
+ Seq(2) -> Seq(2, 2),
+ Seq(1) -> Seq(1))
TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.mapConcat(x => (1 to x).map(_ => x))))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala
index 6fe3a3a742..1839244803 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala
@@ -206,10 +206,11 @@ class FlowScanAsyncSpec extends StreamSpec {
}
}
- def whenFailedScan(elements: immutable.Seq[Int],
- zero: Int,
- throwable: Throwable = new Exception("non fatal exception"),
- decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = {
+ def whenFailedScan(
+ elements: immutable.Seq[Int],
+ zero: Int,
+ throwable: Throwable = new Exception("non fatal exception"),
+ decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = {
val failedScanFlow = Flow[Int].scanAsync(zero) { (accumulator: Int, next: Int) =>
if (next >= 0) Future(accumulator + next)
else throw throwable
@@ -222,9 +223,10 @@ class FlowScanAsyncSpec extends StreamSpec {
.expectNext(zero)
}
- def whenEventualFuture(promises: immutable.Seq[Promise[Int]],
- zero: Int,
- decider: Supervision.Decider = Supervision.stoppingDecider)
+ def whenEventualFuture(
+ promises: immutable.Seq[Promise[Int]],
+ zero: Int,
+ decider: Supervision.Decider = Supervision.stoppingDecider)
: (TestPublisher.Probe[Int], TestSubscriber.Probe[Int]) = {
require(promises.nonEmpty, "must be at least one promise")
val promiseScanFlow = Flow[Int].scanAsync(zero) { (accumulator: Int, next: Int) =>
@@ -243,10 +245,11 @@ class FlowScanAsyncSpec extends StreamSpec {
(pub, sub)
}
- def whenFailedFuture(elements: immutable.Seq[Int],
- zero: Int,
- throwable: Throwable = new Exception("non fatal exception"),
- decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = {
+ def whenFailedFuture(
+ elements: immutable.Seq[Int],
+ zero: Int,
+ throwable: Throwable = new Exception("non fatal exception"),
+ decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = {
val failedFutureScanFlow = Flow[Int].scanAsync(zero) { (accumulator: Int, next: Int) =>
if (next >= 0) Future(accumulator + next)
else Future.failed(throwable)
@@ -259,9 +262,10 @@ class FlowScanAsyncSpec extends StreamSpec {
.expectNext(zero)
}
- def whenNullElement(elements: immutable.Seq[String],
- zero: String,
- decider: Supervision.Decider = Supervision.stoppingDecider): Probe[String] = {
+ def whenNullElement(
+ elements: immutable.Seq[String],
+ zero: String,
+ decider: Supervision.Decider = Supervision.stoppingDecider): Probe[String] = {
val nullFutureScanFlow: Flow[String, String, _] = Flow[String].scanAsync(zero) { (_: String, next: String) =>
if (next != "null") Future(next)
else Future(null)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala
index 7c4abe06c6..bc9cfba861 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala
@@ -483,9 +483,10 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re
}
"call future subscribers' onError should be called instead of onSubscribed after initial upstream reported an error" in {
- new ChainSetup[Int, String, NotUsed](_.map(_ => throw TestException),
- settings.withInputBuffer(initialSize = 1, maxSize = 1),
- toFanoutPublisher(1)) {
+ new ChainSetup[Int, String, NotUsed](
+ _.map(_ => throw TestException),
+ settings.withInputBuffer(initialSize = 1, maxSize = 1),
+ toFanoutPublisher(1)) {
downstreamSubscription.request(1)
upstreamSubscription.expectRequest(1)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala
index 7702b46f73..66e5395fa7 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala
@@ -48,9 +48,10 @@ class FlowSplitAfterSpec extends StreamSpec {
def cancel(): Unit = subscription.cancel()
}
- class SubstreamsSupport(splitAfter: Int = 3,
- elementCount: Int = 6,
- substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) {
+ class SubstreamsSupport(
+ splitAfter: Int = 3,
+ elementCount: Int = 6,
+ substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) {
val source = Source(1 to elementCount)
val groupStream = source.splitAfter(substreamCancelStrategy)(_ == splitAfter).lift.runWith(Sink.asPublisher(false))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala
index 20e06a8551..cc0dddc8b0 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala
@@ -40,9 +40,10 @@ class FlowSplitWhenSpec extends StreamSpec {
def cancel(): Unit = subscription.cancel()
}
- class SubstreamsSupport(splitWhen: Int = 3,
- elementCount: Int = 6,
- substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) {
+ class SubstreamsSupport(
+ splitWhen: Int = 3,
+ elementCount: Int = 6,
+ substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) {
val source = Source(1 to elementCount)
val groupStream = source.splitWhen(substreamCancelStrategy)(_ == splitWhen).lift.runWith(Sink.asPublisher(false))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala
index 771cfb73ec..fc58f1895d 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala
@@ -160,20 +160,22 @@ class FramingSpec extends StreamSpec {
val fieldOffsets = List(0, 1, 2, 3, 15, 16, 31, 32, 44, 107)
def encode(payload: ByteString, fieldOffset: Int, fieldLength: Int, byteOrder: ByteOrder): ByteString = {
- encodeComplexFrame(payload,
- fieldOffset,
- fieldLength,
- byteOrder,
- ByteString(new Array[Byte](fieldOffset)),
- ByteString.empty)
+ encodeComplexFrame(
+ payload,
+ fieldOffset,
+ fieldLength,
+ byteOrder,
+ ByteString(new Array[Byte](fieldOffset)),
+ ByteString.empty)
}
- def encodeComplexFrame(payload: ByteString,
- fieldOffset: Int,
- fieldLength: Int,
- byteOrder: ByteOrder,
- offset: ByteString,
- tail: ByteString): ByteString = {
+ def encodeComplexFrame(
+ payload: ByteString,
+ fieldOffset: Int,
+ fieldLength: Int,
+ byteOrder: ByteOrder,
+ offset: ByteString,
+ tail: ByteString): ByteString = {
val header = {
val h = (new ByteStringBuilder).putInt(payload.size)(byteOrder).result()
byteOrder match {
@@ -228,12 +230,13 @@ class FramingSpec extends StreamSpec {
val payload = referenceChunk.take(length)
val offsetBytes = offset()
val tailBytes = if (offsetBytes.length > 0) new Array[Byte](offsetBytes(0)) else Array.empty[Byte]
- encodeComplexFrame(payload,
- fieldOffset,
- fieldLength,
- byteOrder,
- ByteString(offsetBytes),
- ByteString(tailBytes))
+ encodeComplexFrame(
+ payload,
+ fieldOffset,
+ fieldLength,
+ byteOrder,
+ ByteString(offsetBytes),
+ ByteString(tailBytes))
}
Source(encodedFrames)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala
index 859a51b633..457705575a 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala
@@ -102,28 +102,29 @@ class GraphBroadcastSpec extends StreamSpec {
List(f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22))
val result = RunnableGraph
- .fromGraph(GraphDSL.create(headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink,
- headSink)(combine) {
+ .fromGraph(GraphDSL.create(
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink,
+ headSink)(combine) {
implicit b =>
(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20, p21, p22) =>
val bcast = b.add(Broadcast[Int](22))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala
index 702bf10a16..f9812d5cfc 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala
@@ -136,11 +136,12 @@ class GraphMergePrioritizedSpec extends TwoStreamsSetup {
}
}
- private def threeSourceMerge[T](source1: Source[T, NotUsed],
- source2: Source[T, NotUsed],
- source3: Source[T, NotUsed],
- priorities: Seq[Int],
- probe: ManualProbe[T]) = {
+ private def threeSourceMerge[T](
+ source1: Source[T, NotUsed],
+ source2: Source[T, NotUsed],
+ source3: Source[T, NotUsed],
+ priorities: Seq[Int],
+ probe: ManualProbe[T]) = {
RunnableGraph.fromGraph(GraphDSL.create(source1, source2, source3)((_, _, _)) { implicit b => (s1, s2, s3) =>
val merge = b.add(MergePrioritized[T](priorities))
// introduce a delay on the consuming side making it more likely that
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala
index ccbe2a2895..dd82b3c737 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala
@@ -240,28 +240,29 @@ class GraphUnzipWithSpec extends StreamSpec {
RunnableGraph
.fromGraph(GraphDSL.create() { implicit b =>
val split22 = (a: (List[Int])) =>
- (a(0),
- a(0).toString,
- a(1),
- a(1).toString,
- a(2),
- a(2).toString,
- a(3),
- a(3).toString,
- a(4),
- a(4).toString,
- a(5),
- a(5).toString,
- a(6),
- a(6).toString,
- a(7),
- a(7).toString,
- a(8),
- a(8).toString,
- a(9),
- a(9).toString,
- a(10),
- a(10).toString)
+ (
+ a(0),
+ a(0).toString,
+ a(1),
+ a(1).toString,
+ a(2),
+ a(2).toString,
+ a(3),
+ a(3).toString,
+ a(4),
+ a(4).toString,
+ a(5),
+ a(5).toString,
+ a(6),
+ a(6).toString,
+ a(7),
+ a(7).toString,
+ a(8),
+ a(8).toString,
+ a(9),
+ a(9).toString,
+ a(10),
+ a(10).toString)
// odd input ports will be Int, even input ports will be String
val unzip = b.add(UnzipWith(split22))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala
index 67d26552a5..4f3972c239 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala
@@ -171,28 +171,29 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup {
RunnableGraph
.fromGraph(GraphDSL.create() { implicit b =>
- val sum22 = (v1: Int,
- v2: String,
- v3: Int,
- v4: String,
- v5: Int,
- v6: String,
- v7: Int,
- v8: String,
- v9: Int,
- v10: String,
- v11: Int,
- v12: String,
- v13: Int,
- v14: String,
- v15: Int,
- v16: String,
- v17: Int,
- v18: String,
- v19: Int,
- v20: String,
- v21: Int,
- v22: String) =>
+ val sum22 = (
+ v1: Int,
+ v2: String,
+ v3: Int,
+ v4: String,
+ v5: Int,
+ v6: String,
+ v7: Int,
+ v8: String,
+ v9: Int,
+ v10: String,
+ v11: Int,
+ v12: String,
+ v13: Int,
+ v14: String,
+ v15: Int,
+ v16: String,
+ v17: Int,
+ v18: String,
+ v19: Int,
+ v20: String,
+ v21: Int,
+ v22: String) =>
v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 + v10 +
v11 + v12 + v13 + v14 + v15 + v16 + v17 + v18 + v19 + v20 + v21 + v22
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala
index ceb88991d3..8e55511f61 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala
@@ -149,28 +149,29 @@ class GraphZipWithSpec extends TwoStreamsSetup {
RunnableGraph
.fromGraph(GraphDSL.create() { implicit b =>
- val sum22 = (v1: Int,
- v2: String,
- v3: Int,
- v4: String,
- v5: Int,
- v6: String,
- v7: Int,
- v8: String,
- v9: Int,
- v10: String,
- v11: Int,
- v12: String,
- v13: Int,
- v14: String,
- v15: Int,
- v16: String,
- v17: Int,
- v18: String,
- v19: Int,
- v20: String,
- v21: Int,
- v22: String) =>
+ val sum22 = (
+ v1: Int,
+ v2: String,
+ v3: Int,
+ v4: String,
+ v5: Int,
+ v6: String,
+ v7: Int,
+ v8: String,
+ v9: Int,
+ v10: String,
+ v11: Int,
+ v12: String,
+ v13: Int,
+ v14: String,
+ v15: Int,
+ v16: String,
+ v17: Int,
+ v18: String,
+ v19: Int,
+ v20: String,
+ v21: Int,
+ v22: String) =>
v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 + v10 +
v11 + v12 + v13 + v14 + v15 + v16 + v17 + v18 +
v19 + v20 + v21 + v22
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala
index cec00a4287..3e5a228311 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala
@@ -464,9 +464,10 @@ class HubSpec extends StreamSpec {
"be able to use as fastest consumer router" in assertAllStagesStopped {
val source = Source(0 until 1000).runWith(
- PartitionHub.statefulSink(() => (info, elem) => info.consumerIds.toVector.minBy(id => info.queueSize(id)),
- startAfterNrOfConsumers = 2,
- bufferSize = 4))
+ PartitionHub.statefulSink(
+ () => (info, elem) => info.consumerIds.toVector.minBy(id => info.queueSize(id)),
+ startAfterNrOfConsumers = 2,
+ bufferSize = 4))
val result1 = source.runWith(Sink.seq)
val result2 = source.throttle(10, 100.millis, 10, ThrottleMode.Shaping).runWith(Sink.seq)
@@ -662,9 +663,10 @@ class HubSpec extends StreamSpec {
"drop elements with negative index" in assertAllStagesStopped {
val source = Source(0 until 10).runWith(
- PartitionHub.sink((size, elem) => if (elem == 3 || elem == 4) -1 else elem % size,
- startAfterNrOfConsumers = 2,
- bufferSize = 8))
+ PartitionHub.sink(
+ (size, elem) => if (elem == 3 || elem == 4) -1 else elem % size,
+ startAfterNrOfConsumers = 2,
+ bufferSize = 8))
val result1 = source.runWith(Sink.seq)
val result2 = source.runWith(Sink.seq)
result1.futureValue should ===((0 to 8 by 2).filterNot(_ == 4))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala
index 6637f38d30..5b6948d894 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala
@@ -38,9 +38,10 @@ class JsonFramingSpec extends AkkaSpec {
}
// #using-json-framing
- result.futureValue shouldBe Seq("""{ "name" : "john" }""",
- """{ "name" : "Ég get etið gler án þess að meiða mig" }""",
- """{ "name" : "jack" }""")
+ result.futureValue shouldBe Seq(
+ """{ "name" : "john" }""",
+ """{ "name" : "Ég get etið gler án þess að meiða mig" }""",
+ """{ "name" : "jack" }""")
}
"emit single json element from string" in {
@@ -72,9 +73,10 @@ class JsonFramingSpec extends AkkaSpec {
case (acc, entry) => acc ++ Seq(entry.utf8String)
}
- Await.result(result, 3.seconds) shouldBe Seq("""{ "name": "john" }""",
- """{ "name": "jack" }""",
- """{ "name": "katie" }""")
+ Await.result(result, 3.seconds) shouldBe Seq(
+ """{ "name": "john" }""",
+ """{ "name": "jack" }""",
+ """{ "name": "katie" }""")
}
"parse comma delimited" in {
@@ -90,23 +92,25 @@ class JsonFramingSpec extends AkkaSpec {
}
"parse chunks successfully" in {
- val input: Seq[ByteString] = Seq("""
+ val input: Seq[ByteString] = Seq(
+ """
|[
| { "name": "john"""".stripMargin,
- """
+ """
|},
""".stripMargin,
- """{ "na""",
- """me": "jack""",
- """"}]"""").map(ByteString(_))
+ """{ "na""",
+ """me": "jack""",
+ """"}]"""").map(ByteString(_))
val result = Source.apply(input).via(JsonFraming.objectScanner(Int.MaxValue)).runFold(Seq.empty[String]) {
case (acc, entry) => acc ++ Seq(entry.utf8String)
}
- result.futureValue shouldBe Seq("""{ "name": "john"
+ result.futureValue shouldBe Seq(
+ """{ "name": "john"
|}""".stripMargin,
- """{ "name": "jack"}""")
+ """{ "name": "jack"}""")
}
"emit all elements after input completes" in {
@@ -480,9 +484,10 @@ class JsonFramingSpec extends AkkaSpec {
}
"fail when 2nd object is too large" in {
- val input = List("""{ "name": "john" }""",
- """{ "name": "jack" }""",
- """{ "name": "very very long name somehow. how did this happen?" }""").map(s => ByteString(s))
+ val input = List(
+ """{ "name": "john" }""",
+ """{ "name": "jack" }""",
+ """{ "name": "very very long name somehow. how did this happen?" }""").map(s => ByteString(s))
val probe = Source(input).via(JsonFraming.objectScanner(48)).runWith(TestSink.probe)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala
index af016e512e..036b5ef723 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala
@@ -501,10 +501,11 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1
RestartFlow.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts)
}
- def setupFlow(minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- maxRestarts: Int = -1,
- onlyOnFailures: Boolean = false) = {
+ def setupFlow(
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ maxRestarts: Int = -1,
+ onlyOnFailures: Boolean = false) = {
val created = new AtomicInteger()
val (flowInSource: TestPublisher.Probe[String], flowInProbe: TestSubscriber.Probe[String]) =
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala
index d5869cdb8d..a82efb5729 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala
@@ -19,23 +19,25 @@ class ReverseArrowSpec extends StreamSpec {
"Reverse Arrows in the Graph DSL" must {
"work from Inlets" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- s.in <~ source
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ s.in <~ source
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work from SinkShape" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- s <~ source
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ s <~ source
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work from Sink" in {
@@ -74,15 +76,16 @@ class ReverseArrowSpec extends StreamSpec {
}
"work from FlowShape" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- val f: FlowShape[Int, Int] = b.add(Flow[Int])
- f <~ source
- f ~> s
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ val f: FlowShape[Int, Int] = b.add(Flow[Int])
+ f <~ source
+ f ~> s
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work from UniformFanInShape" in {
@@ -114,47 +117,51 @@ class ReverseArrowSpec extends StreamSpec {
}
"work towards Outlets" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- val o: Outlet[Int] = b.add(source).out
- s <~ o
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ val o: Outlet[Int] = b.add(source).out
+ s <~ o
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work towards SourceShape" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- val o: SourceShape[Int] = b.add(source)
- s <~ o
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ val o: SourceShape[Int] = b.add(source)
+ s <~ o
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work towards Source" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- s <~ source
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ s <~ source
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work towards FlowShape" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- val f: FlowShape[Int, Int] = b.add(Flow[Int])
- s <~ f
- source ~> f
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ val f: FlowShape[Int, Int] = b.add(Flow[Int])
+ s <~ f
+ source ~> f
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work towards UniformFanInShape" in {
@@ -218,23 +225,25 @@ class ReverseArrowSpec extends StreamSpec {
}
"work across a Flow" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- s <~ Flow[Int] <~ source
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ s <~ Flow[Int] <~ source
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
"work across a FlowShape" in {
- Await.result(RunnableGraph
- .fromGraph(GraphDSL.create(sink) { implicit b => s =>
- s <~ b.add(Flow[Int]) <~ source
- ClosedShape
- })
- .run(),
- 1.second) should ===(Seq(1, 2, 3))
+ Await.result(
+ RunnableGraph
+ .fromGraph(GraphDSL.create(sink) { implicit b => s =>
+ s <~ b.add(Flow[Int]) <~ source
+ ClosedShape
+ })
+ .run(),
+ 1.second) should ===(Seq(1, 2, 3))
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala
index a05c6c5c06..99f86cb85a 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala
@@ -107,9 +107,9 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures {
"combine to many outputs with simplified API" in {
val probes = Seq.fill(3)(TestSubscriber.manualProbe[Int]())
- val sink = Sink.combine(Sink.fromSubscriber(probes(0)),
- Sink.fromSubscriber(probes(1)),
- Sink.fromSubscriber(probes(2)))(Broadcast[Int](_))
+ val sink =
+ Sink.combine(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)), Sink.fromSubscriber(probes(2)))(
+ Broadcast[Int](_))
Source(List(0, 1, 2)).runWith(sink)
@@ -212,10 +212,11 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures {
"Java collector Sink" must {
- class TestCollector(_supplier: () => Supplier[Array[Int]],
- _accumulator: () => BiConsumer[Array[Int], Int],
- _combiner: () => BinaryOperator[Array[Int]],
- _finisher: () => function.Function[Array[Int], Int])
+ class TestCollector(
+ _supplier: () => Supplier[Array[Int]],
+ _accumulator: () => BiConsumer[Array[Int], Int],
+ _combiner: () => BinaryOperator[Array[Int]],
+ _finisher: () => function.Function[Array[Int], Int])
extends Collector[Int, Array[Int], Int] {
override def supplier(): Supplier[Array[Int]] = _supplier()
override def combiner(): BinaryOperator[Array[Int]] = _combiner()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala
index 751ee3d403..d2c38c8bf3 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala
@@ -213,24 +213,25 @@ object StageActorRefSpec {
}
}
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = {
- sum += grab(in)
- p.trySuccess(sum)
- completeStage()
- }
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush(): Unit = {
+ sum += grab(in)
+ p.trySuccess(sum)
+ completeStage()
+ }
- override def onUpstreamFinish(): Unit = {
- p.trySuccess(sum)
- completeStage()
- }
+ override def onUpstreamFinish(): Unit = {
+ p.trySuccess(sum)
+ completeStage()
+ }
- override def onUpstreamFailure(ex: Throwable): Unit = {
- p.tryFailure(ex)
- failStage(ex)
- }
- })
+ override def onUpstreamFailure(ex: Throwable): Unit = {
+ p.tryFailure(ex)
+ failStage(ex)
+ }
+ })
}
logic -> p.future
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala
index f76465b2bb..297403b6e7 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala
@@ -29,12 +29,13 @@ class TakeLastSinkSpec extends StreamSpec {
//#takeLast-operator-example
case class Student(name: String, gpa: Double)
- val students = List(Student("Alison", 4.7),
- Student("Adrian", 3.1),
- Student("Alexis", 4),
- Student("Benita", 2.1),
- Student("Kendra", 4.2),
- Student("Jerrie", 4.3)).sortBy(_.gpa)
+ val students = List(
+ Student("Alison", 4.7),
+ Student("Adrian", 3.1),
+ Student("Alexis", 4),
+ Student("Benita", 2.1),
+ Student("Kendra", 4.2),
+ Student("Jerrie", 4.3)).sortBy(_.gpa)
val sourceOfStudents = Source(students)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala
index e404411320..2bc460a7fa 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala
@@ -21,11 +21,12 @@ import scala.concurrent.{ Await, ExecutionContext, Future, Promise }
object UnfoldResourceAsyncSourceSpec {
- class ResourceDummy[T](values: Seq[T],
- // these can be used to control when the resource creates, reads first element and completes closing
- createFuture: Future[Done] = Future.successful(Done),
- firstReadFuture: Future[Done] = Future.successful(Done),
- closeFuture: Future[Done] = Future.successful(Done))(implicit ec: ExecutionContext) {
+ class ResourceDummy[T](
+ values: Seq[T],
+ // these can be used to control when the resource creates, reads first element and completes closing
+ createFuture: Future[Done] = Future.successful(Done),
+ firstReadFuture: Future[Done] = Future.successful(Done),
+ closeFuture: Future[Done] = Future.successful(Done))(implicit ec: ExecutionContext) {
private val iterator = values.iterator
private val createdP = Promise[Done]()
private val closedP = Promise[Done]()
@@ -137,9 +138,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"fail when close throws exception" in assertAllStagesStopped {
val probe = TestSubscriber.probe[Unit]()
Source
- .unfoldResourceAsync[Unit, Unit](() => Future.successful(()),
- _ => Future.successful[Option[Unit]](None),
- _ => throw TE(""))
+ .unfoldResourceAsync[Unit, Unit](
+ () => Future.successful(()),
+ _ => Future.successful[Option[Unit]](None),
+ _ => throw TE(""))
.runWith(Sink.fromSubscriber(probe))
probe.ensureSubscription()
probe.request(1L)
@@ -149,9 +151,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"fail when close returns failed future" in assertAllStagesStopped {
val probe = TestSubscriber.probe[Unit]()
Source
- .unfoldResourceAsync[Unit, Unit](() => Future.successful(()),
- _ => Future.successful[Option[Unit]](None),
- _ => Future.failed(throw TE("")))
+ .unfoldResourceAsync[Unit, Unit](
+ () => Future.successful(()),
+ _ => Future.successful[Option[Unit]](None),
+ _ => Future.failed(throw TE("")))
.runWith(Sink.fromSubscriber(probe))
probe.ensureSubscription()
probe.request(1L)
@@ -160,15 +163,16 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"continue when Strategy is Resume and read throws" in assertAllStagesStopped {
val result = Source
- .unfoldResourceAsync[Int, Iterator[Any]](() => Future.successful(List(1, 2, TE("read-error"), 3).iterator),
- iterator =>
- if (iterator.hasNext) {
- iterator.next() match {
- case n: Int => Future.successful(Some(n))
- case e: TE => throw e
- }
- } else Future.successful(None),
- _ => Future.successful(Done))
+ .unfoldResourceAsync[Int, Iterator[Any]](
+ () => Future.successful(List(1, 2, TE("read-error"), 3).iterator),
+ iterator =>
+ if (iterator.hasNext) {
+ iterator.next() match {
+ case n: Int => Future.successful(Some(n))
+ case e: TE => throw e
+ }
+ } else Future.successful(None),
+ _ => Future.successful(Done))
.withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
.runWith(Sink.seq)
@@ -177,15 +181,16 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"continue when Strategy is Resume and read returns failed future" in assertAllStagesStopped {
val result = Source
- .unfoldResourceAsync[Int, Iterator[Any]](() => Future.successful(List(1, 2, TE("read-error"), 3).iterator),
- iterator =>
- if (iterator.hasNext) {
- iterator.next() match {
- case n: Int => Future.successful(Some(n))
- case e: TE => Future.failed(e)
- }
- } else Future.successful(None),
- _ => Future.successful(Done))
+ .unfoldResourceAsync[Int, Iterator[Any]](
+ () => Future.successful(List(1, 2, TE("read-error"), 3).iterator),
+ iterator =>
+ if (iterator.hasNext) {
+ iterator.next() match {
+ case n: Int => Future.successful(Some(n))
+ case e: TE => Future.failed(e)
+ }
+ } else Future.successful(None),
+ _ => Future.successful(Done))
.withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
.runWith(Sink.seq)
@@ -245,9 +250,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"fail stream when restarting and close throws" in assertAllStagesStopped {
val out = TestSubscriber.probe[Int]()
Source
- .unfoldResourceAsync[Int, Iterator[Int]](() => Future.successful(List(1, 2, 3).iterator),
- reader => throw TE("read-error"),
- _ => throw new TE("close-error"))
+ .unfoldResourceAsync[Int, Iterator[Int]](
+ () => Future.successful(List(1, 2, 3).iterator),
+ reader => throw TE("read-error"),
+ _ => throw new TE("close-error"))
.withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider))
.runWith(Sink.fromSubscriber(out))
@@ -258,9 +264,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"fail stream when restarting and close returns failed future" in assertAllStagesStopped {
val out = TestSubscriber.probe[Int]()
Source
- .unfoldResourceAsync[Int, Iterator[Int]](() => Future.successful(List(1, 2, 3).iterator),
- reader => throw TE("read-error"),
- _ => Future.failed(new TE("close-error")))
+ .unfoldResourceAsync[Int, Iterator[Int]](
+ () => Future.successful(List(1, 2, 3).iterator),
+ reader => throw TE("read-error"),
+ _ => Future.failed(new TE("close-error")))
.withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider))
.runWith(Sink.fromSubscriber(out))
@@ -307,9 +314,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
val materializer = ActorMaterializer()(sys)
try {
val p = Source
- .unfoldResourceAsync[String, Unit](() => Promise[Unit].future, // never complete
- _ => ???,
- _ => ???)
+ .unfoldResourceAsync[String, Unit](
+ () => Promise[Unit].future, // never complete
+ _ => ???,
+ _ => ???)
.runWith(Sink.ignore)(materializer)
materializer
@@ -326,16 +334,15 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
val closeLatch = TestLatch(1)
val mat = ActorMaterializer()
val p = Source
- .unfoldResourceAsync[String, Unit](() => Future.successful(()),
- // a slow trickle of elements that never ends
- _ =>
- akka.pattern.after(100.millis, system.scheduler)(
- Future.successful(Some("element"))),
- _ =>
- Future.successful {
- closeLatch.countDown()
- Done
- })
+ .unfoldResourceAsync[String, Unit](
+ () => Future.successful(()),
+ // a slow trickle of elements that never ends
+ _ => akka.pattern.after(100.millis, system.scheduler)(Future.successful(Some("element"))),
+ _ =>
+ Future.successful {
+ closeLatch.countDown()
+ Done
+ })
.runWith(Sink.asPublisher(false))(mat)
val c = TestSubscriber.manualProbe[String]()
p.subscribe(c)
@@ -350,11 +357,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
val closePromise = Promise[Done]()
Source
.unfoldResourceAsync[String, Unit](
- // delay it a bit to give cancellation time to come upstream
- () =>
- akka.pattern.after(100.millis, system.scheduler)(Future.successful(())),
- _ => Future.successful(Some("whatever")),
- _ => closePromise.success(Done).future)
+ // delay it a bit to give cancellation time to come upstream
+ () => akka.pattern.after(100.millis, system.scheduler)(Future.successful(())),
+ _ => Future.successful(Some("whatever")),
+ _ => closePromise.success(Done).future)
.runWith(Sink.cancelled)
closePromise.future.futureValue should ===(Done)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala
index cdc27be869..37feb3211a 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala
@@ -51,9 +51,10 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"Unfold Resource Source" must {
"read contents from a file" in assertAllStagesStopped {
val p = Source
- .unfoldResource[String, BufferedReader](() => newBufferedReader(),
- reader => Option(reader.readLine()),
- reader => reader.close())
+ .unfoldResource[String, BufferedReader](
+ () => newBufferedReader(),
+ reader => Option(reader.readLine()),
+ reader => reader.close())
.runWith(Sink.asPublisher(false))
val c = TestSubscriber.manualProbe[String]()
p.subscribe(c)
@@ -151,9 +152,10 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
val materializer = ActorMaterializer()(sys)
try {
val p = Source
- .unfoldResource[String, BufferedReader](() => newBufferedReader(),
- reader => Option(reader.readLine()),
- reader => reader.close())
+ .unfoldResource[String, BufferedReader](
+ () => newBufferedReader(),
+ reader => Option(reader.readLine()),
+ reader => reader.close())
.runWith(TestSink.probe)(materializer)
materializer
@@ -169,9 +171,10 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"fail when create throws exception" in assertAllStagesStopped {
EventFilter[TE](occurrences = 1).intercept {
val p = Source
- .unfoldResource[String, BufferedReader](() => throw TE(""),
- reader => Option(reader.readLine()),
- reader => reader.close())
+ .unfoldResource[String, BufferedReader](
+ () => throw TE(""),
+ reader => Option(reader.readLine()),
+ reader => reader.close())
.runWith(Sink.asPublisher(false))
val c = TestSubscriber.manualProbe[String]()
p.subscribe(c)
@@ -186,9 +189,10 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
EventFilter[TE](occurrences = 1).intercept {
Source
- .unfoldResource[String, Iterator[String]](() => Iterator("a"),
- it => if (it.hasNext) Some(it.next()) else None,
- _ => throw TE(""))
+ .unfoldResource[String, Iterator[String]](
+ () => Iterator("a"),
+ it => if (it.hasNext) Some(it.next()) else None,
+ _ => throw TE(""))
.runWith(Sink.fromSubscriber(out))
out.request(61)
@@ -201,9 +205,10 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"not close the resource twice when read fails" in {
val closedCounter = new AtomicInteger(0)
val probe = Source
- .unfoldResource[Int, Int](() => 23, // the best resource there is
- _ => throw TE("failing read"),
- _ => closedCounter.incrementAndGet())
+ .unfoldResource[Int, Int](
+ () => 23, // the best resource there is
+ _ => throw TE("failing read"),
+ _ => closedCounter.incrementAndGet())
.runWith(TestSink.probe[Int])
probe.request(1)
@@ -215,11 +220,12 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) {
"not close the resource twice when read fails and then close fails" in {
val closedCounter = new AtomicInteger(0)
val probe = Source
- .unfoldResource[Int, Int](() => 23, // the best resource there is
- _ => throw TE("failing read"), { _ =>
- closedCounter.incrementAndGet()
- if (closedCounter.get == 1) throw TE("boom")
- })
+ .unfoldResource[Int, Int](
+ () => 23, // the best resource there is
+ _ => throw TE("failing read"), { _ =>
+ closedCounter.incrementAndGet()
+ if (closedCounter.get == 1) throw TE("boom")
+ })
.runWith(TestSink.probe[Int])
EventFilter[TE](occurrences = 1).intercept {
diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala
index e8b9eba3fd..3e09b6ebbb 100644
--- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala
+++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala
@@ -55,9 +55,10 @@ object ActorFlow {
* @tparam Q Question message type that is spoken by the target actor
* @tparam A Answer type that the Actor is expected to reply with, it will become the Output type of this Flow
*/
- def ask[I, Q, A](ref: ActorRef[Q],
- timeout: java.time.Duration,
- makeMessage: BiFunction[I, ActorRef[A], Q]): Flow[I, A, NotUsed] =
+ def ask[I, Q, A](
+ ref: ActorRef[Q],
+ timeout: java.time.Duration,
+ makeMessage: BiFunction[I, ActorRef[A], Q]): Flow[I, A, NotUsed] =
akka.stream.typed.scaladsl.ActorFlow
.ask[I, Q, A](parallelism = 2)(ref)((i, ref) => makeMessage(i, ref))(
JavaDurationConverters.asFiniteDuration(timeout))
@@ -96,10 +97,11 @@ object ActorFlow {
* @tparam Q Question message type that is spoken by the target actor
* @tparam A Answer type that the Actor is expected to reply with, it will become the Output type of this Flow
*/
- def ask[I, Q, A](parallelism: Int,
- ref: ActorRef[Q],
- timeout: java.time.Duration,
- makeMessage: (I, ActorRef[A]) => Q): Flow[I, A, NotUsed] =
+ def ask[I, Q, A](
+ parallelism: Int,
+ ref: ActorRef[Q],
+ timeout: java.time.Duration,
+ makeMessage: (I, ActorRef[A]) => Q): Flow[I, A, NotUsed] =
akka.stream.typed.scaladsl.ActorFlow
.ask[I, Q, A](parallelism)(ref)((i, ref) => makeMessage(i, ref))(timeout.toMillis.millis)
.asJava
diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala
index 23efacdcf8..bdc3b3f94c 100644
--- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala
+++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala
@@ -41,9 +41,10 @@ object ActorMaterializerFactory {
* the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of
* `namePrefix-flowNumber-flowStepNumber-stepName`.
*/
- def create[T](settings: ActorMaterializerSettings,
- namePrefix: String,
- actorSystem: ActorSystem[T]): akka.stream.ActorMaterializer =
+ def create[T](
+ settings: ActorMaterializerSettings,
+ namePrefix: String,
+ actorSystem: ActorSystem[T]): akka.stream.ActorMaterializer =
akka.stream.ActorMaterializer.create(settings, actorSystem.toUntyped, namePrefix)
/**
@@ -74,8 +75,9 @@ object ActorMaterializerFactory {
* the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of
* `namePrefix-flowNumber-flowStepNumber-stepName`.
*/
- def create[T](settings: ActorMaterializerSettings,
- namePrefix: String,
- ctx: ActorContext[T]): akka.stream.ActorMaterializer =
+ def create[T](
+ settings: ActorMaterializerSettings,
+ namePrefix: String,
+ ctx: ActorContext[T]): akka.stream.ActorMaterializer =
akka.stream.ActorMaterializer.create(settings, Adapter.toUntyped(ctx), namePrefix)
}
diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala
index cc45595c51..a39ceedac3 100644
--- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala
+++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala
@@ -30,9 +30,10 @@ object ActorSink {
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
* limiting operator in front of this `Sink`.
*/
- def actorRef[T](ref: ActorRef[T],
- onCompleteMessage: T,
- onFailureMessage: akka.japi.function.Function[Throwable, T]): Sink[T, NotUsed] =
+ def actorRef[T](
+ ref: ActorRef[T],
+ onCompleteMessage: T,
+ onFailureMessage: akka.japi.function.Function[Throwable, T]): Sink[T, NotUsed] =
typed.scaladsl.ActorSink.actorRef(ref, onCompleteMessage, onFailureMessage.apply).asJava
/**
@@ -48,19 +49,21 @@ object ActorSink {
* When the stream is completed with failure - result of `onFailureMessage(throwable)`
* function will be sent to the destination actor.
*/
- def actorRefWithAck[T, M, A](ref: ActorRef[M],
- messageAdapter: akka.japi.function.Function2[ActorRef[A], T, M],
- onInitMessage: akka.japi.function.Function[ActorRef[A], M],
- ackMessage: A,
- onCompleteMessage: M,
- onFailureMessage: akka.japi.function.Function[Throwable, M]): Sink[T, NotUsed] =
+ def actorRefWithAck[T, M, A](
+ ref: ActorRef[M],
+ messageAdapter: akka.japi.function.Function2[ActorRef[A], T, M],
+ onInitMessage: akka.japi.function.Function[ActorRef[A], M],
+ ackMessage: A,
+ onCompleteMessage: M,
+ onFailureMessage: akka.japi.function.Function[Throwable, M]): Sink[T, NotUsed] =
typed.scaladsl.ActorSink
- .actorRefWithAck(ref,
- messageAdapter.apply,
- onInitMessage.apply,
- ackMessage,
- onCompleteMessage,
- onFailureMessage.apply)
+ .actorRefWithAck(
+ ref,
+ messageAdapter.apply,
+ onInitMessage.apply,
+ ackMessage,
+ onCompleteMessage,
+ onFailureMessage.apply)
.asJava
}
diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala
index a03f58db92..cc16238ef2 100644
--- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala
+++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala
@@ -47,15 +47,17 @@ object ActorSource {
* @param bufferSize The size of the buffer in element count
* @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
- def actorRef[T](completionMatcher: Predicate[T],
- failureMatcher: PartialFunction[T, Throwable],
- bufferSize: Int,
- overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] = {
+ def actorRef[T](
+ completionMatcher: Predicate[T],
+ failureMatcher: PartialFunction[T, Throwable],
+ bufferSize: Int,
+ overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] = {
akka.stream.typed.scaladsl.ActorSource
- .actorRef({ case m if completionMatcher.test(m) => }: PartialFunction[T, Unit],
- failureMatcher,
- bufferSize,
- overflowStrategy)
+ .actorRef(
+ { case m if completionMatcher.test(m) => }: PartialFunction[T, Unit],
+ failureMatcher,
+ bufferSize,
+ overflowStrategy)
.asJava
}
}
diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala
index c05508f87d..1d42d19b49 100644
--- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala
+++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala
@@ -39,9 +39,10 @@ object ActorMaterializer {
* the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of
* `namePrefix-flowNumber-flowStepNumber-stepName`.
*/
- def boundToActor[T](ctx: ActorContext[T],
- materializerSettings: Option[ActorMaterializerSettings] = None,
- namePrefix: Option[String] = None): ActorMaterializer =
+ def boundToActor[T](
+ ctx: ActorContext[T],
+ materializerSettings: Option[ActorMaterializerSettings] = None,
+ namePrefix: Option[String] = None): ActorMaterializer =
akka.stream.ActorMaterializer(materializerSettings, namePrefix)(ctx.toUntyped)
}
diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala
index 98c12b36d5..3384e9e5e1 100644
--- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala
+++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala
@@ -46,17 +46,19 @@ object ActorSink {
* When the stream is completed with failure - result of `onFailureMessage(throwable)`
* function will be sent to the destination actor.
*/
- def actorRefWithAck[T, M, A](ref: ActorRef[M],
- messageAdapter: (ActorRef[A], T) => M,
- onInitMessage: ActorRef[A] => M,
- ackMessage: A,
- onCompleteMessage: M,
- onFailureMessage: Throwable => M): Sink[T, NotUsed] =
- Sink.actorRefWithAck(ref.toUntyped,
- messageAdapter.curried.compose(actorRefAdapter),
- onInitMessage.compose(actorRefAdapter),
- ackMessage,
- onCompleteMessage,
- onFailureMessage)
+ def actorRefWithAck[T, M, A](
+ ref: ActorRef[M],
+ messageAdapter: (ActorRef[A], T) => M,
+ onInitMessage: ActorRef[A] => M,
+ ackMessage: A,
+ onCompleteMessage: M,
+ onFailureMessage: Throwable => M): Sink[T, NotUsed] =
+ Sink.actorRefWithAck(
+ ref.toUntyped,
+ messageAdapter.curried.compose(actorRefAdapter),
+ onInitMessage.compose(actorRefAdapter),
+ ackMessage,
+ onCompleteMessage,
+ onFailureMessage)
}
diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala
index a2a0948dc4..cafc12886f 100644
--- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala
+++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala
@@ -47,14 +47,16 @@ object ActorSource {
* @param bufferSize The size of the buffer in element count
* @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
- def actorRef[T](completionMatcher: PartialFunction[T, Unit],
- failureMatcher: PartialFunction[T, Throwable],
- bufferSize: Int,
- overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] =
+ def actorRef[T](
+ completionMatcher: PartialFunction[T, Unit],
+ failureMatcher: PartialFunction[T, Throwable],
+ bufferSize: Int,
+ overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] =
Source
- .actorRef[T](completionMatcher.asInstanceOf[PartialFunction[Any, Unit]],
- failureMatcher.asInstanceOf[PartialFunction[Any, Throwable]],
- bufferSize,
- overflowStrategy)
+ .actorRef[T](
+ completionMatcher.asInstanceOf[PartialFunction[Any, Unit]],
+ failureMatcher.asInstanceOf[PartialFunction[Any, Throwable]],
+ bufferSize,
+ overflowStrategy)
.mapMaterializedValue(actorRefAdapter)
}
diff --git a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala
index b957bb557b..93102152c6 100644
--- a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala
+++ b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala
@@ -81,12 +81,13 @@ object ActorSourceSinkExample {
val actor: ActorRef[Protocol] = ???
- val sink: Sink[String, NotUsed] = ActorSink.actorRefWithAck(ref = actor,
- onCompleteMessage = Complete,
- onFailureMessage = Fail.apply,
- messageAdapter = Message.apply,
- onInitMessage = Init.apply,
- ackMessage = Ack)
+ val sink: Sink[String, NotUsed] = ActorSink.actorRefWithAck(
+ ref = actor,
+ onCompleteMessage = Complete,
+ onFailureMessage = Fail.apply,
+ messageAdapter = Message.apply,
+ onInitMessage = Init.apply,
+ ackMessage = Ack)
Source.single("msg1").runWith(sink)
// #actor-sink-ref-with-ack
diff --git a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala
index 00107477a5..b7ff5382fd 100644
--- a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala
+++ b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala
@@ -60,17 +60,19 @@ object ActorMaterializer {
val haveShutDown = new AtomicBoolean(false)
val system = actorSystemOf(context)
- new PhasedFusingActorMaterializer(system,
- materializerSettings,
- system.dispatchers,
- actorOfStreamSupervisor(materializerSettings, context, haveShutDown),
- haveShutDown,
- FlowNames(system).name.copy(namePrefix))
+ new PhasedFusingActorMaterializer(
+ system,
+ materializerSettings,
+ system.dispatchers,
+ actorOfStreamSupervisor(materializerSettings, context, haveShutDown),
+ haveShutDown,
+ FlowNames(system).name.copy(namePrefix))
}
- private def actorOfStreamSupervisor(materializerSettings: ActorMaterializerSettings,
- context: ActorRefFactory,
- haveShutDown: AtomicBoolean) = {
+ private def actorOfStreamSupervisor(
+ materializerSettings: ActorMaterializerSettings,
+ context: ActorRefFactory,
+ haveShutDown: AtomicBoolean) = {
val props = StreamSupervisor.props(materializerSettings, haveShutDown)
context match {
case s: ExtendedActorSystem => s.systemActorOf(props, StreamSupervisor.nextName())
@@ -96,9 +98,10 @@ object ActorMaterializer {
/**
* INTERNAL API: Creates the `StreamSupervisor` as a system actor.
*/
- private[akka] def systemMaterializer(materializerSettings: ActorMaterializerSettings,
- namePrefix: String,
- system: ExtendedActorSystem): ActorMaterializer = {
+ private[akka] def systemMaterializer(
+ materializerSettings: ActorMaterializerSettings,
+ namePrefix: String,
+ system: ExtendedActorSystem): ActorMaterializer = {
val haveShutDown = new AtomicBoolean(false)
new PhasedFusingActorMaterializer(
system,
@@ -255,32 +258,34 @@ object ActorMaterializerSettings {
@deprecated(
"Create the settings using the apply(system) or apply(config) method, and then modify them using the .with methods.",
since = "2.5.10")
- def apply(initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
- subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int) = {
+ def apply(
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
+ subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int) = {
// these sins were committed in the name of bin comp:
val config = ConfigFactory.defaultReference
- new ActorMaterializerSettings(initialInputBufferSize,
- maxInputBufferSize,
- dispatcher,
- supervisionDecider,
- subscriptionTimeoutSettings,
- debugLogging,
- outputBurstLimit,
- fuzzingMode,
- autoFusing,
- maxFixedBufferSize,
- 1000,
- IOSettings(tcpWriteBufferSize = 16 * 1024),
- StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")),
- config.getString(ActorAttributes.IODispatcher.dispatcher))
+ new ActorMaterializerSettings(
+ initialInputBufferSize,
+ maxInputBufferSize,
+ dispatcher,
+ supervisionDecider,
+ subscriptionTimeoutSettings,
+ debugLogging,
+ outputBurstLimit,
+ fuzzingMode,
+ autoFusing,
+ maxFixedBufferSize,
+ 1000,
+ IOSettings(tcpWriteBufferSize = 16 * 1024),
+ StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")),
+ config.getString(ActorAttributes.IODispatcher.dispatcher))
}
/**
@@ -293,20 +298,21 @@ object ActorMaterializerSettings {
* Create [[ActorMaterializerSettings]] from a Config subsection (Scala).
*/
def apply(config: Config): ActorMaterializerSettings =
- new ActorMaterializerSettings(initialInputBufferSize = config.getInt("initial-input-buffer-size"),
- maxInputBufferSize = config.getInt("max-input-buffer-size"),
- dispatcher = config.getString("dispatcher"),
- supervisionDecider = Supervision.stoppingDecider,
- subscriptionTimeoutSettings = StreamSubscriptionTimeoutSettings(config),
- debugLogging = config.getBoolean("debug-logging"),
- outputBurstLimit = config.getInt("output-burst-limit"),
- fuzzingMode = config.getBoolean("debug.fuzzing-mode"),
- autoFusing = config.getBoolean("auto-fusing"),
- maxFixedBufferSize = config.getInt("max-fixed-buffer-size"),
- syncProcessingLimit = config.getInt("sync-processing-limit"),
- ioSettings = IOSettings(config.getConfig("io")),
- streamRefSettings = StreamRefSettings(config.getConfig("stream-ref")),
- blockingIoDispatcher = config.getString("blocking-io-dispatcher"))
+ new ActorMaterializerSettings(
+ initialInputBufferSize = config.getInt("initial-input-buffer-size"),
+ maxInputBufferSize = config.getInt("max-input-buffer-size"),
+ dispatcher = config.getString("dispatcher"),
+ supervisionDecider = Supervision.stoppingDecider,
+ subscriptionTimeoutSettings = StreamSubscriptionTimeoutSettings(config),
+ debugLogging = config.getBoolean("debug-logging"),
+ outputBurstLimit = config.getInt("output-burst-limit"),
+ fuzzingMode = config.getBoolean("debug.fuzzing-mode"),
+ autoFusing = config.getBoolean("auto-fusing"),
+ maxFixedBufferSize = config.getInt("max-fixed-buffer-size"),
+ syncProcessingLimit = config.getInt("sync-processing-limit"),
+ ioSettings = IOSettings(config.getConfig("io")),
+ streamRefSettings = StreamRefSettings(config.getConfig("stream-ref")),
+ blockingIoDispatcher = config.getString("blocking-io-dispatcher"))
/**
* Create [[ActorMaterializerSettings]] from individual settings (Java).
@@ -315,32 +321,34 @@ object ActorMaterializerSettings {
@deprecated(
"Create the settings using the create(system) or create(config) method, and then modify them using the .with methods.",
since = "2.5.10")
- def create(initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
- subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int) = {
+ def create(
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
+ subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int) = {
// these sins were committed in the name of bin comp:
val config = ConfigFactory.defaultReference
- new ActorMaterializerSettings(initialInputBufferSize,
- maxInputBufferSize,
- dispatcher,
- supervisionDecider,
- subscriptionTimeoutSettings,
- debugLogging,
- outputBurstLimit,
- fuzzingMode,
- autoFusing,
- maxFixedBufferSize,
- 1000,
- IOSettings(tcpWriteBufferSize = 16 * 1024),
- StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")),
- config.getString(ActorAttributes.IODispatcher.dispatcher))
+ new ActorMaterializerSettings(
+ initialInputBufferSize,
+ maxInputBufferSize,
+ dispatcher,
+ supervisionDecider,
+ subscriptionTimeoutSettings,
+ debugLogging,
+ outputBurstLimit,
+ fuzzingMode,
+ autoFusing,
+ maxFixedBufferSize,
+ 1000,
+ IOSettings(tcpWriteBufferSize = 16 * 1024),
+ StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")),
+ config.getString(ActorAttributes.IODispatcher.dispatcher))
}
/**
@@ -389,124 +397,133 @@ final class ActorMaterializerSettings @InternalApi private (
require(syncProcessingLimit > 0, "syncProcessingLimit must be > 0")
requirePowerOfTwo(maxInputBufferSize, "maxInputBufferSize")
- require(initialInputBufferSize <= maxInputBufferSize,
- s"initialInputBufferSize($initialInputBufferSize) must be <= maxInputBufferSize($maxInputBufferSize)")
+ require(
+ initialInputBufferSize <= maxInputBufferSize,
+ s"initialInputBufferSize($initialInputBufferSize) must be <= maxInputBufferSize($maxInputBufferSize)")
// backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima
@deprecated("Use ActorMaterializerSettings.apply or ActorMaterializerSettings.create instead", "2.5.10")
- def this(initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
- subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int,
- syncProcessingLimit: Int,
- ioSettings: IOSettings) =
+ def this(
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
+ subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int,
+ syncProcessingLimit: Int,
+ ioSettings: IOSettings) =
// using config like this is not quite right but the only way to solve backwards comp without hard coding settings
- this(initialInputBufferSize,
- maxInputBufferSize,
- dispatcher,
- supervisionDecider,
- subscriptionTimeoutSettings,
- debugLogging,
- outputBurstLimit,
- fuzzingMode,
- autoFusing,
- maxFixedBufferSize,
- syncProcessingLimit,
- ioSettings,
- StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")),
- ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher))
+ this(
+ initialInputBufferSize,
+ maxInputBufferSize,
+ dispatcher,
+ supervisionDecider,
+ subscriptionTimeoutSettings,
+ debugLogging,
+ outputBurstLimit,
+ fuzzingMode,
+ autoFusing,
+ maxFixedBufferSize,
+ syncProcessingLimit,
+ ioSettings,
+ StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")),
+ ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher))
// backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima
@deprecated("Use ActorMaterializerSettings.apply or ActorMaterializerSettings.create instead", "2.5.10")
- def this(initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
- subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int,
- syncProcessingLimit: Int) =
+ def this(
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
+ subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int,
+ syncProcessingLimit: Int) =
// using config like this is not quite right but the only way to solve backwards comp without hard coding settings
- this(initialInputBufferSize,
- maxInputBufferSize,
- dispatcher,
- supervisionDecider,
- subscriptionTimeoutSettings,
- debugLogging,
- outputBurstLimit,
- fuzzingMode,
- autoFusing,
- maxFixedBufferSize,
- syncProcessingLimit,
- IOSettings(tcpWriteBufferSize = 16 * 1024),
- StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")),
- ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher))
+ this(
+ initialInputBufferSize,
+ maxInputBufferSize,
+ dispatcher,
+ supervisionDecider,
+ subscriptionTimeoutSettings,
+ debugLogging,
+ outputBurstLimit,
+ fuzzingMode,
+ autoFusing,
+ maxFixedBufferSize,
+ syncProcessingLimit,
+ IOSettings(tcpWriteBufferSize = 16 * 1024),
+ StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")),
+ ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher))
// backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima
@deprecated("Use ActorMaterializerSettings.apply or ActorMaterializerSettings.create instead", "2.5.10")
- def this(initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
- subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int) =
+ def this(
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
+ subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int) =
// using config like this is not quite right but the only way to solve backwards comp without hard coding settings
- this(initialInputBufferSize,
- maxInputBufferSize,
- dispatcher,
- supervisionDecider,
- subscriptionTimeoutSettings,
- debugLogging,
- outputBurstLimit,
- fuzzingMode,
- autoFusing,
- maxFixedBufferSize,
- 1000,
- IOSettings(tcpWriteBufferSize = 16 * 1024),
- StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")),
- ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher))
+ this(
+ initialInputBufferSize,
+ maxInputBufferSize,
+ dispatcher,
+ supervisionDecider,
+ subscriptionTimeoutSettings,
+ debugLogging,
+ outputBurstLimit,
+ fuzzingMode,
+ autoFusing,
+ maxFixedBufferSize,
+ 1000,
+ IOSettings(tcpWriteBufferSize = 16 * 1024),
+ StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")),
+ ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher))
- private def copy(initialInputBufferSize: Int = this.initialInputBufferSize,
- maxInputBufferSize: Int = this.maxInputBufferSize,
- dispatcher: String = this.dispatcher,
- supervisionDecider: Supervision.Decider = this.supervisionDecider,
- subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings = this.subscriptionTimeoutSettings,
- debugLogging: Boolean = this.debugLogging,
- outputBurstLimit: Int = this.outputBurstLimit,
- fuzzingMode: Boolean = this.fuzzingMode,
- autoFusing: Boolean = this.autoFusing,
- maxFixedBufferSize: Int = this.maxFixedBufferSize,
- syncProcessingLimit: Int = this.syncProcessingLimit,
- ioSettings: IOSettings = this.ioSettings,
- streamRefSettings: StreamRefSettings = this.streamRefSettings,
- blockingIoDispatcher: String = this.blockingIoDispatcher) = {
- new ActorMaterializerSettings(initialInputBufferSize,
- maxInputBufferSize,
- dispatcher,
- supervisionDecider,
- subscriptionTimeoutSettings,
- debugLogging,
- outputBurstLimit,
- fuzzingMode,
- autoFusing,
- maxFixedBufferSize,
- syncProcessingLimit,
- ioSettings,
- streamRefSettings,
- blockingIoDispatcher)
+ private def copy(
+ initialInputBufferSize: Int = this.initialInputBufferSize,
+ maxInputBufferSize: Int = this.maxInputBufferSize,
+ dispatcher: String = this.dispatcher,
+ supervisionDecider: Supervision.Decider = this.supervisionDecider,
+ subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings = this.subscriptionTimeoutSettings,
+ debugLogging: Boolean = this.debugLogging,
+ outputBurstLimit: Int = this.outputBurstLimit,
+ fuzzingMode: Boolean = this.fuzzingMode,
+ autoFusing: Boolean = this.autoFusing,
+ maxFixedBufferSize: Int = this.maxFixedBufferSize,
+ syncProcessingLimit: Int = this.syncProcessingLimit,
+ ioSettings: IOSettings = this.ioSettings,
+ streamRefSettings: StreamRefSettings = this.streamRefSettings,
+ blockingIoDispatcher: String = this.blockingIoDispatcher) = {
+ new ActorMaterializerSettings(
+ initialInputBufferSize,
+ maxInputBufferSize,
+ dispatcher,
+ supervisionDecider,
+ subscriptionTimeoutSettings,
+ debugLogging,
+ outputBurstLimit,
+ fuzzingMode,
+ autoFusing,
+ maxFixedBufferSize,
+ syncProcessingLimit,
+ ioSettings,
+ streamRefSettings,
+ blockingIoDispatcher)
}
/**
@@ -705,15 +722,17 @@ object StreamSubscriptionTimeoutSettings {
/**
* Create settings from individual values (Java).
*/
- def create(mode: StreamSubscriptionTimeoutTerminationMode,
- timeout: FiniteDuration): StreamSubscriptionTimeoutSettings =
+ def create(
+ mode: StreamSubscriptionTimeoutTerminationMode,
+ timeout: FiniteDuration): StreamSubscriptionTimeoutSettings =
new StreamSubscriptionTimeoutSettings(mode, timeout)
/**
* Create settings from individual values (Scala).
*/
- def apply(mode: StreamSubscriptionTimeoutTerminationMode,
- timeout: FiniteDuration): StreamSubscriptionTimeoutSettings =
+ def apply(
+ mode: StreamSubscriptionTimeoutTerminationMode,
+ timeout: FiniteDuration): StreamSubscriptionTimeoutSettings =
new StreamSubscriptionTimeoutSettings(mode, timeout)
/**
@@ -739,8 +758,9 @@ object StreamSubscriptionTimeoutSettings {
* Leaked publishers and subscribers are cleaned up when they are not used within a given
* deadline, configured by [[StreamSubscriptionTimeoutSettings]].
*/
-final class StreamSubscriptionTimeoutSettings(val mode: StreamSubscriptionTimeoutTerminationMode,
- val timeout: FiniteDuration) {
+final class StreamSubscriptionTimeoutSettings(
+ val mode: StreamSubscriptionTimeoutTerminationMode,
+ val timeout: FiniteDuration) {
override def equals(other: Any): Boolean = other match {
case s: StreamSubscriptionTimeoutSettings => s.mode == mode && s.timeout == timeout
case _ => false
diff --git a/akka-stream/src/main/scala/akka/stream/Attributes.scala b/akka-stream/src/main/scala/akka/stream/Attributes.scala
index 5a362f8e4c..b500ba5996 100644
--- a/akka-stream/src/main/scala/akka/stream/Attributes.scala
+++ b/akka-stream/src/main/scala/akka/stream/Attributes.scala
@@ -360,9 +360,10 @@ object Attributes {
* Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]].
*
*/
- def createLogLevels(onElement: Logging.LogLevel,
- onFinish: Logging.LogLevel,
- onFailure: Logging.LogLevel): Attributes =
+ def createLogLevels(
+ onElement: Logging.LogLevel,
+ onFinish: Logging.LogLevel,
+ onFailure: Logging.LogLevel): Attributes =
logLevels(onElement, onFinish, onFailure)
/**
@@ -381,9 +382,10 @@ object Attributes {
*
* See [[Attributes.createLogLevels]] for Java API
*/
- def logLevels(onElement: Logging.LogLevel = Logging.DebugLevel,
- onFinish: Logging.LogLevel = Logging.DebugLevel,
- onFailure: Logging.LogLevel = Logging.ErrorLevel) =
+ def logLevels(
+ onElement: Logging.LogLevel = Logging.DebugLevel,
+ onFinish: Logging.LogLevel = Logging.DebugLevel,
+ onFailure: Logging.LogLevel = Logging.ErrorLevel) =
Attributes(LogLevels(onElement, onFinish, onFailure))
/**
@@ -462,9 +464,10 @@ object ActorAttributes {
* Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]].
*
*/
- def createLogLevels(onElement: Logging.LogLevel,
- onFinish: Logging.LogLevel,
- onFailure: Logging.LogLevel): Attributes =
+ def createLogLevels(
+ onElement: Logging.LogLevel,
+ onFinish: Logging.LogLevel,
+ onFailure: Logging.LogLevel): Attributes =
logLevels(onElement, onFinish, onFailure)
/**
@@ -483,9 +486,10 @@ object ActorAttributes {
*
* See [[Attributes.createLogLevels]] for Java API
*/
- def logLevels(onElement: Logging.LogLevel = Logging.DebugLevel,
- onFinish: Logging.LogLevel = Logging.DebugLevel,
- onFailure: Logging.LogLevel = Logging.ErrorLevel) =
+ def logLevels(
+ onElement: Logging.LogLevel = Logging.DebugLevel,
+ onFinish: Logging.LogLevel = Logging.DebugLevel,
+ onFailure: Logging.LogLevel = Logging.ErrorLevel) =
Attributes(LogLevels(onElement, onFinish, onFailure))
}
diff --git a/akka-stream/src/main/scala/akka/stream/FanInShape.scala b/akka-stream/src/main/scala/akka/stream/FanInShape.scala
index 9d9a0c2fb0..5588dc689e 100644
--- a/akka-stream/src/main/scala/akka/stream/FanInShape.scala
+++ b/akka-stream/src/main/scala/akka/stream/FanInShape.scala
@@ -23,9 +23,10 @@ object FanInShape {
}
}
-abstract class FanInShape[+O] private (_out: Outlet[O @uncheckedVariance],
- _registered: Iterator[Inlet[_]],
- _name: String)
+abstract class FanInShape[+O] private (
+ _out: Outlet[O @uncheckedVariance],
+ _registered: Iterator[Inlet[_]],
+ _name: String)
extends Shape {
import FanInShape._
diff --git a/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala b/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala
index 910f8032c5..a49fa75d0b 100644
--- a/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala
+++ b/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala
@@ -19,9 +19,10 @@ class FanInShape1N[-T0, -T1, +O](val n: Int, _init: FanInShape.Init[O]) extends
def this(n: Int) = this(n, FanInShape.Name[O]("FanInShape1N"))
def this(n: Int, name: String) = this(n, FanInShape.Name[O](name))
- def this(outlet: Outlet[O @uncheckedVariance],
- in0: Inlet[T0 @uncheckedVariance],
- inlets1: Array[Inlet[T1 @uncheckedVariance]]) =
+ def this(
+ outlet: Outlet[O @uncheckedVariance],
+ in0: Inlet[T0 @uncheckedVariance],
+ inlets1: Array[Inlet[T1 @uncheckedVariance]]) =
this(inlets1.length, FanInShape.Ports(outlet, in0 :: inlets1.toList))
override protected def construct(init: FanInShape.Init[O @uncheckedVariance]): FanInShape[O] =
new FanInShape1N(n, init)
diff --git a/akka-stream/src/main/scala/akka/stream/FanOutShape.scala b/akka-stream/src/main/scala/akka/stream/FanOutShape.scala
index 17546f1755..f9c327f7c3 100644
--- a/akka-stream/src/main/scala/akka/stream/FanOutShape.scala
+++ b/akka-stream/src/main/scala/akka/stream/FanOutShape.scala
@@ -23,9 +23,10 @@ object FanOutShape {
}
}
-abstract class FanOutShape[-I] private (_in: Inlet[I @uncheckedVariance],
- _registered: Iterator[Outlet[_]],
- _name: String)
+abstract class FanOutShape[-I] private (
+ _in: Inlet[I @uncheckedVariance],
+ _registered: Iterator[Outlet[_]],
+ _name: String)
extends Shape {
import FanOutShape._
diff --git a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala
index 781ca9a49e..df8ae76b75 100644
--- a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala
+++ b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala
@@ -96,10 +96,11 @@ object KillSwitches {
extends GraphStageWithMaterializedValue[BidiShape[Any, Any, Any, Any], UniqueKillSwitch] {
override val initialAttributes = Attributes.name("breaker")
- override val shape = BidiShape(Inlet[Any]("KillSwitchBidi.in1"),
- Outlet[Any]("KillSwitchBidi.out1"),
- Inlet[Any]("KillSwitchBidi.in2"),
- Outlet[Any]("KillSwitchBidi.out2"))
+ override val shape = BidiShape(
+ Inlet[Any]("KillSwitchBidi.in1"),
+ Outlet[Any]("KillSwitchBidi.out1"),
+ Inlet[Any]("KillSwitchBidi.in2"),
+ Outlet[Any]("KillSwitchBidi.out2"))
override def toString: String = "UniqueKillSwitchBidi"
override def createLogicAndMaterializedValue(attr: Attributes) = {
diff --git a/akka-stream/src/main/scala/akka/stream/Materializer.scala b/akka-stream/src/main/scala/akka/stream/Materializer.scala
index 36bc628ba1..2b607fe132 100644
--- a/akka-stream/src/main/scala/akka/stream/Materializer.scala
+++ b/akka-stream/src/main/scala/akka/stream/Materializer.scala
@@ -46,8 +46,9 @@ abstract class Materializer {
* The result can be highly implementation specific, ranging from local actor chains to remote-deployed
* processing networks.
*/
- def materialize[Mat](runnable: Graph[ClosedShape, Mat],
- @deprecatedName('initialAttributes) defaultAttributes: Attributes): Mat
+ def materialize[Mat](
+ runnable: Graph[ClosedShape, Mat],
+ @deprecatedName('initialAttributes) defaultAttributes: Attributes): Mat
/**
* Running a flow graph will require execution resources, as will computations
@@ -107,6 +108,7 @@ private[akka] object NoMaterializer extends Materializer {
* INTERNAL API
*/
@InternalApi
-private[akka] case class MaterializationContext(materializer: Materializer,
- effectiveAttributes: Attributes,
- islandName: String)
+private[akka] case class MaterializationContext(
+ materializer: Materializer,
+ effectiveAttributes: Attributes,
+ islandName: String)
diff --git a/akka-stream/src/main/scala/akka/stream/Shape.scala b/akka-stream/src/main/scala/akka/stream/Shape.scala
index 3f5692633c..d9aa4e9343 100644
--- a/akka-stream/src/main/scala/akka/stream/Shape.scala
+++ b/akka-stream/src/main/scala/akka/stream/Shape.scala
@@ -339,10 +339,11 @@ object SinkShape {
* +------+
* }}}
*/
-final case class BidiShape[-In1, +Out1, -In2, +Out2](in1: Inlet[In1 @uncheckedVariance],
- out1: Outlet[Out1 @uncheckedVariance],
- in2: Inlet[In2 @uncheckedVariance],
- out2: Outlet[Out2 @uncheckedVariance])
+final case class BidiShape[-In1, +Out1, -In2, +Out2](
+ in1: Inlet[In1 @uncheckedVariance],
+ out1: Outlet[Out1 @uncheckedVariance],
+ in2: Inlet[In2 @uncheckedVariance],
+ out2: Outlet[Out2 @uncheckedVariance])
extends Shape {
//#implementation-details-elided
override val inlets: immutable.Seq[Inlet[_]] = in1 :: in2 :: Nil
@@ -364,10 +365,11 @@ object BidiShape {
BidiShape(top.in, top.out, bottom.in, bottom.out)
/** Java API */
- def of[In1, Out1, In2, Out2](in1: Inlet[In1 @uncheckedVariance],
- out1: Outlet[Out1 @uncheckedVariance],
- in2: Inlet[In2 @uncheckedVariance],
- out2: Outlet[Out2 @uncheckedVariance]): BidiShape[In1, Out1, In2, Out2] =
+ def of[In1, Out1, In2, Out2](
+ in1: Inlet[In1 @uncheckedVariance],
+ out1: Outlet[Out1 @uncheckedVariance],
+ in2: Inlet[In2 @uncheckedVariance],
+ out2: Outlet[Out2 @uncheckedVariance]): BidiShape[In1, Out1, In2, Out2] =
BidiShape(in1, out1, in2, out2)
}
diff --git a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala
index 97e70d414a..9d5929c193 100644
--- a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala
+++ b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala
@@ -197,10 +197,11 @@ object TLSProtocol {
* on client authentication requirements while `clientAuth = Some(ClientAuth.None)`
* switches off client authentication.
*/
- case class NegotiateNewSession(enabledCipherSuites: Option[immutable.Seq[String]],
- enabledProtocols: Option[immutable.Seq[String]],
- clientAuth: Option[TLSClientAuth],
- sslParameters: Option[SSLParameters])
+ case class NegotiateNewSession(
+ enabledCipherSuites: Option[immutable.Seq[String]],
+ enabledProtocols: Option[immutable.Seq[String]],
+ clientAuth: Option[TLSClientAuth],
+ sslParameters: Option[SSLParameters])
extends SslTlsOutbound {
/**
diff --git a/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala b/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala
index ed39c95ff9..2eb2ca3d7e 100644
--- a/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala
+++ b/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala
@@ -28,12 +28,11 @@ object StreamRefSettings {
/** Scala API */
def apply(c: Config): StreamRefSettings = {
- StreamRefSettingsImpl(bufferCapacity = c.getInt("buffer-capacity"),
- demandRedeliveryInterval =
- c.getDuration("demand-redelivery-interval", TimeUnit.MILLISECONDS).millis,
- subscriptionTimeout = c.getDuration("subscription-timeout", TimeUnit.MILLISECONDS).millis,
- finalTerminationSignalDeadline =
- c.getDuration("final-termination-signal-deadline", TimeUnit.MILLISECONDS).millis)
+ StreamRefSettingsImpl(
+ bufferCapacity = c.getInt("buffer-capacity"),
+ demandRedeliveryInterval = c.getDuration("demand-redelivery-interval", TimeUnit.MILLISECONDS).millis,
+ subscriptionTimeout = c.getDuration("subscription-timeout", TimeUnit.MILLISECONDS).millis,
+ finalTerminationSignalDeadline = c.getDuration("final-termination-signal-deadline", TimeUnit.MILLISECONDS).millis)
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala b/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala
index 1fce009841..aaf97513e4 100644
--- a/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala
+++ b/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala
@@ -23,8 +23,9 @@ object Implicits {
/**
* Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`.
*/
- def timed[O, Mat2](measuredOps: Source[I, Mat] => Source[O, Mat2],
- onComplete: FiniteDuration => Unit): Source[O, Mat2] =
+ def timed[O, Mat2](
+ measuredOps: Source[I, Mat] => Source[O, Mat2],
+ onComplete: FiniteDuration => Unit): Source[O, Mat2] =
Timed.timed[I, O, Mat, Mat2](source, measuredOps, onComplete)
/**
@@ -44,8 +45,9 @@ object Implicits {
/**
* Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`.
*/
- def timed[Out, Mat2](measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2],
- onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] =
+ def timed[Out, Mat2](
+ measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2],
+ onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] =
Timed.timed[I, O, Out, Mat, Mat2](flow, measuredOps, onComplete)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/extra/Timed.scala b/akka-stream/src/main/scala/akka/stream/extra/Timed.scala
index fd3e317a07..1f860eb254 100644
--- a/akka-stream/src/main/scala/akka/stream/extra/Timed.scala
+++ b/akka-stream/src/main/scala/akka/stream/extra/Timed.scala
@@ -26,9 +26,10 @@ private[akka] trait TimedOps {
* Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`.
*/
@deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5")
- def timed[I, O, Mat, Mat2](source: Source[I, Mat],
- measuredOps: Source[I, Mat] => Source[O, Mat2],
- onComplete: FiniteDuration => Unit): Source[O, Mat2] = {
+ def timed[I, O, Mat, Mat2](
+ source: Source[I, Mat],
+ measuredOps: Source[I, Mat] => Source[O, Mat2],
+ onComplete: FiniteDuration => Unit): Source[O, Mat2] = {
val ctx = new TimedFlowContext
val startTimed = Flow[I].via(new StartTimed(ctx)).named("startTimed")
@@ -43,9 +44,10 @@ private[akka] trait TimedOps {
* Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`.
*/
@deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5")
- def timed[I, O, Out, Mat, Mat2](flow: Flow[I, O, Mat],
- measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2],
- onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] = {
+ def timed[I, O, Out, Mat, Mat2](
+ flow: Flow[I, O, Mat],
+ measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2],
+ onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] = {
// todo is there any other way to provide this for Flow, without duplicating impl?
// they do share a super-type (FlowOps), but all operations of FlowOps return path dependant type
val ctx = new TimedFlowContext
@@ -71,9 +73,10 @@ private[akka] trait TimedIntervalBetweenOps {
* Measures rolling interval between immediately subsequent `matching(o: O)` elements.
*/
@deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5")
- def timedIntervalBetween[O, Mat](source: Source[O, Mat],
- matching: O => Boolean,
- onInterval: FiniteDuration => Unit): Source[O, Mat] = {
+ def timedIntervalBetween[O, Mat](
+ source: Source[O, Mat],
+ matching: O => Boolean,
+ onInterval: FiniteDuration => Unit): Source[O, Mat] = {
val timedInterval = Flow[O].via(new TimedInterval[O](matching, onInterval)).named("timedInterval")
source.via(timedInterval)
}
@@ -82,9 +85,10 @@ private[akka] trait TimedIntervalBetweenOps {
* Measures rolling interval between immediately subsequent `matching(o: O)` elements.
*/
@deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5")
- def timedIntervalBetween[I, O, Mat](flow: Flow[I, O, Mat],
- matching: O => Boolean,
- onInterval: FiniteDuration => Unit): Flow[I, O, Mat] = {
+ def timedIntervalBetween[I, O, Mat](
+ flow: Flow[I, O, Mat],
+ matching: O => Boolean,
+ onInterval: FiniteDuration => Unit): Flow[I, O, Mat] = {
val timedInterval = Flow[O].via(new TimedInterval[O](matching, onInterval)).named("timedInterval")
flow.via(timedInterval)
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
index 3e5140c379..82c88b5cb0 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
@@ -32,10 +32,11 @@ import scala.concurrent.{ Await, ExecutionContextExecutor }
@InternalApi def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat], defaultAttributes: Attributes): Mat
/** INTERNAL API */
- @InternalApi private[akka] def materialize[Mat](graph: Graph[ClosedShape, Mat],
- defaultAttributes: Attributes,
- defaultPhase: Phase[Any],
- phases: Map[IslandTag, Phase[Any]]): Mat
+ @InternalApi private[akka] def materialize[Mat](
+ graph: Graph[ClosedShape, Mat],
+ defaultAttributes: Attributes,
+ defaultPhase: Phase[Any],
+ phases: Map[IslandTag, Phase[Any]]): Mat
/**
* INTERNAL API
@@ -91,14 +92,16 @@ import scala.concurrent.{ Await, ExecutionContextExecutor }
*
* The default phases are left in-tact since we still respect `.async` and other tags that were marked within a sub-fused graph.
*/
-private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMaterializer,
- registerShell: GraphInterpreterShell => ActorRef)
+private[akka] class SubFusingActorMaterializerImpl(
+ val delegate: ExtendedActorMaterializer,
+ registerShell: GraphInterpreterShell => ActorRef)
extends Materializer {
val subFusingPhase = new Phase[Any] {
- override def apply(settings: ActorMaterializerSettings,
- attributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String): PhaseIsland[Any] = {
+ override def apply(
+ settings: ActorMaterializerSettings,
+ attributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String): PhaseIsland[Any] = {
new GraphStageIsland(settings, attributes, materializer, islandName, OptionVal(registerShell))
.asInstanceOf[PhaseIsland[Any]]
}
@@ -126,9 +129,10 @@ private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMa
override def scheduleOnce(delay: FiniteDuration, task: Runnable): Cancellable = delegate.scheduleOnce(delay, task)
- override def schedulePeriodically(initialDelay: FiniteDuration,
- interval: FiniteDuration,
- task: Runnable): Cancellable =
+ override def schedulePeriodically(
+ initialDelay: FiniteDuration,
+ interval: FiniteDuration,
+ task: Runnable): Cancellable =
delegate.schedulePeriodically(initialDelay, interval, task)
override def withNamePrefix(name: String): SubFusingActorMaterializerImpl =
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala
index d6bc304b69..0a66ea2b02 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala
@@ -252,8 +252,9 @@ import akka.event.Logging
/**
* INTERNAL API
*/
-@InternalApi private[akka] abstract class ActorProcessorImpl(attributes: Attributes,
- val settings: ActorMaterializerSettings)
+@InternalApi private[akka] abstract class ActorProcessorImpl(
+ attributes: Attributes,
+ val settings: ActorMaterializerSettings)
extends Actor
with ActorLogging
with Pump {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala
index 5455659197..56b21f1b71 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala
@@ -100,8 +100,9 @@ import org.reactivestreams.Subscription
/**
* INTERNAL API
*/
-@InternalApi private[akka] class ActorSubscription[T](final val impl: ActorRef,
- final val subscriber: Subscriber[_ >: T])
+@InternalApi private[akka] class ActorSubscription[T](
+ final val impl: ActorRef,
+ final val subscriber: Subscriber[_ >: T])
extends Subscription {
override def request(elements: Long): Unit = impl ! RequestMore(this, elements)
override def cancel(): Unit = impl ! Cancel(this)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala
index bebaf24878..837188255b 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala
@@ -16,12 +16,13 @@ import akka.stream.stage._
/**
* INTERNAL API
*/
-@InternalApi private[akka] class ActorRefBackpressureSinkStage[In](ref: ActorRef,
- messageAdapter: ActorRef => In => Any,
- onInitMessage: ActorRef => Any,
- ackMessage: Any,
- onCompleteMessage: Any,
- onFailureMessage: (Throwable) => Any)
+@InternalApi private[akka] class ActorRefBackpressureSinkStage[In](
+ ref: ActorRef,
+ messageAdapter: ActorRef => In => Any,
+ onInitMessage: ActorRef => Any,
+ ackMessage: Any,
+ onCompleteMessage: Any,
+ onFailureMessage: (Throwable) => Any)
extends GraphStage[SinkShape[In]] {
val in: Inlet[In] = Inlet[In]("ActorRefBackpressureSink.in")
override def initialAttributes = DefaultAttributes.actorRefWithAck
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala
index 26e4da3589..bdf23aa849 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala
@@ -23,10 +23,11 @@ import akka.annotation.InternalApi
/**
* INTERNAL API
*/
-@InternalApi private[akka] class ActorRefSinkActor(ref: ActorRef,
- highWatermark: Int,
- onCompleteMessage: Any,
- onFailureMessage: Throwable => Any)
+@InternalApi private[akka] class ActorRefSinkActor(
+ ref: ActorRef,
+ highWatermark: Int,
+ onCompleteMessage: Any,
+ onFailureMessage: Throwable => Any)
extends ActorSubscriber {
import ActorSubscriberMessage._
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala
index 573bbfc1a7..1b9afc9def 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala
@@ -16,11 +16,12 @@ import akka.stream.ActorMaterializerSettings
* INTERNAL API
*/
@InternalApi private[akka] object ActorRefSourceActor {
- def props(completionMatcher: PartialFunction[Any, Unit],
- failureMatcher: PartialFunction[Any, Throwable],
- bufferSize: Int,
- overflowStrategy: OverflowStrategy,
- settings: ActorMaterializerSettings) = {
+ def props(
+ completionMatcher: PartialFunction[Any, Unit],
+ failureMatcher: PartialFunction[Any, Throwable],
+ bufferSize: Int,
+ overflowStrategy: OverflowStrategy,
+ settings: ActorMaterializerSettings) = {
require(overflowStrategy != OverflowStrategies.Backpressure, "Backpressure overflowStrategy not supported")
val maxFixedBufferSize = settings.maxFixedBufferSize
Props(new ActorRefSourceActor(completionMatcher, failureMatcher, bufferSize, overflowStrategy, maxFixedBufferSize))
@@ -30,11 +31,12 @@ import akka.stream.ActorMaterializerSettings
/**
* INTERNAL API
*/
-@InternalApi private[akka] class ActorRefSourceActor(completionMatcher: PartialFunction[Any, Unit],
- failureMatcher: PartialFunction[Any, Throwable],
- bufferSize: Int,
- overflowStrategy: OverflowStrategy,
- maxFixedBufferSize: Int)
+@InternalApi private[akka] class ActorRefSourceActor(
+ completionMatcher: PartialFunction[Any, Unit],
+ failureMatcher: PartialFunction[Any, Throwable],
+ bufferSize: Int,
+ overflowStrategy: OverflowStrategy,
+ maxFixedBufferSize: Int)
extends akka.stream.actor.ActorPublisher[Any]
with ActorLogging {
import akka.stream.actor.ActorPublisherMessage._
@@ -84,8 +86,9 @@ import akka.stream.ActorMaterializerSettings
buffer.dropTail()
buffer.enqueue(elem)
case s: DropBuffer =>
- log.log(s.logLevel,
- "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]")
+ log.log(
+ s.logLevel,
+ "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]")
buffer.clear()
buffer.enqueue(elem)
case s: DropNew =>
@@ -116,10 +119,11 @@ import akka.stream.ActorMaterializerSettings
if (buffer.isEmpty) onCompleteThenStop() // will complete the stream successfully
case elem if isActive =>
- log.debug("Dropping element because Status.Success received already, " +
- "only draining already buffered elements: [{}] (pending: [{}])",
- elem,
- buffer.used)
+ log.debug(
+ "Dropping element because Status.Success received already, " +
+ "only draining already buffered elements: [{}] (pending: [{}])",
+ elem,
+ buffer.used)
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala b/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala
index 086b6a084a..1b47297843 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala
@@ -311,21 +311,22 @@ import org.reactivestreams.Subscription
extends FanOut(_settings, outputCount = 2) {
outputBunch.markAllOutputs()
- initialPhase(1,
- TransferPhase(primaryInputs.NeedsInput && outputBunch.AllOfMarkedOutputs) { () =>
- primaryInputs.dequeueInputElement() match {
- case (a, b) =>
- outputBunch.enqueue(0, a)
- outputBunch.enqueue(1, b)
+ initialPhase(
+ 1,
+ TransferPhase(primaryInputs.NeedsInput && outputBunch.AllOfMarkedOutputs) { () =>
+ primaryInputs.dequeueInputElement() match {
+ case (a, b) =>
+ outputBunch.enqueue(0, a)
+ outputBunch.enqueue(1, b)
- case t: akka.japi.Pair[_, _] =>
- outputBunch.enqueue(0, t.first)
- outputBunch.enqueue(1, t.second)
+ case t: akka.japi.Pair[_, _] =>
+ outputBunch.enqueue(0, t.first)
+ outputBunch.enqueue(1, t.second)
- case t =>
- throw new IllegalArgumentException(
- s"Unable to unzip elements of type ${t.getClass.getName}, " +
- s"can only handle Tuple2 and akka.japi.Pair!")
- }
- })
+ case t =>
+ throw new IllegalArgumentException(
+ s"Unable to unzip elements of type ${t.getClass.getName}, " +
+ s"can only handle Tuple2 and akka.japi.Pair!")
+ }
+ })
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala
index 958fa99f68..b40e243fbf 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala
@@ -12,10 +12,11 @@ import org.reactivestreams.Subscriber
/**
* INTERNAL API
*/
-@DoNotInherit private[akka] abstract class FanoutOutputs(val maxBufferSize: Int,
- val initialBufferSize: Int,
- self: ActorRef,
- val pump: Pump)
+@DoNotInherit private[akka] abstract class FanoutOutputs(
+ val maxBufferSize: Int,
+ val initialBufferSize: Int,
+ self: ActorRef,
+ val pump: Pump)
extends DefaultOutputTransferStates
with SubscriberManagement[Any] {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala
index 833f85f502..b9fa762b15 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala
@@ -71,9 +71,10 @@ import akka.event.Logging
* that mediate the flow of elements downstream and the propagation of
* back-pressure upstream.
*/
-@InternalApi private[akka] final class PublisherSource[Out](p: Publisher[Out],
- val attributes: Attributes,
- shape: SourceShape[Out])
+@InternalApi private[akka] final class PublisherSource[Out](
+ p: Publisher[Out],
+ val attributes: Attributes,
+ shape: SourceShape[Out])
extends SourceModule[Out, NotUsed](shape) {
override protected def label: String = s"PublisherSource($p)"
@@ -91,9 +92,10 @@ import akka.event.Logging
* Creates and wraps an actor into [[org.reactivestreams.Publisher]] from the given `props`,
* which should be [[akka.actor.Props]] for an [[akka.stream.actor.ActorPublisher]].
*/
-@InternalApi private[akka] final class ActorPublisherSource[Out](props: Props,
- val attributes: Attributes,
- shape: SourceShape[Out])
+@InternalApi private[akka] final class ActorPublisherSource[Out](
+ props: Props,
+ val attributes: Attributes,
+ shape: SourceShape[Out])
extends SourceModule[Out, ActorRef](shape) {
override def create(context: MaterializationContext) = {
@@ -110,12 +112,13 @@ import akka.event.Logging
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class ActorRefSource[Out](completionMatcher: PartialFunction[Any, Unit],
- failureMatcher: PartialFunction[Any, Throwable],
- bufferSize: Int,
- overflowStrategy: OverflowStrategy,
- val attributes: Attributes,
- shape: SourceShape[Out])
+@InternalApi private[akka] final class ActorRefSource[Out](
+ completionMatcher: PartialFunction[Any, Unit],
+ failureMatcher: PartialFunction[Any, Throwable],
+ bufferSize: Int,
+ overflowStrategy: OverflowStrategy,
+ val attributes: Attributes,
+ shape: SourceShape[Out])
extends SourceModule[Out, ActorRef](shape) {
override protected def label: String = s"ActorRefSource($bufferSize, $overflowStrategy)"
diff --git a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala
index 3dfae62268..2862684467 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala
@@ -44,41 +44,46 @@ import akka.util.OptionVal
val Debug = false
val DefaultPhase: Phase[Any] = new Phase[Any] {
- override def apply(settings: ActorMaterializerSettings,
- effectiveAttributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String): PhaseIsland[Any] =
+ override def apply(
+ settings: ActorMaterializerSettings,
+ effectiveAttributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String): PhaseIsland[Any] =
new GraphStageIsland(settings, effectiveAttributes, materializer, islandName, subflowFuser = OptionVal.None)
.asInstanceOf[PhaseIsland[Any]]
}
val DefaultPhases: Map[IslandTag, Phase[Any]] = Map[IslandTag, Phase[Any]](
SinkModuleIslandTag -> new Phase[Any] {
- override def apply(settings: ActorMaterializerSettings,
- effectiveAttributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String): PhaseIsland[Any] =
+ override def apply(
+ settings: ActorMaterializerSettings,
+ effectiveAttributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String): PhaseIsland[Any] =
new SinkModulePhase(materializer, islandName).asInstanceOf[PhaseIsland[Any]]
},
SourceModuleIslandTag -> new Phase[Any] {
- override def apply(settings: ActorMaterializerSettings,
- effectiveAttributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String): PhaseIsland[Any] =
+ override def apply(
+ settings: ActorMaterializerSettings,
+ effectiveAttributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String): PhaseIsland[Any] =
new SourceModulePhase(materializer, islandName).asInstanceOf[PhaseIsland[Any]]
},
ProcessorModuleIslandTag -> new Phase[Any] {
- override def apply(settings: ActorMaterializerSettings,
- effectiveAttributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String): PhaseIsland[Any] =
+ override def apply(
+ settings: ActorMaterializerSettings,
+ effectiveAttributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String): PhaseIsland[Any] =
new ProcessorModulePhase().asInstanceOf[PhaseIsland[Any]]
},
TlsModuleIslandTag -> new Phase[Any] {
- def apply(settings: ActorMaterializerSettings,
- effectiveAttributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String): PhaseIsland[Any] =
+ def apply(
+ settings: ActorMaterializerSettings,
+ effectiveAttributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String): PhaseIsland[Any] =
new TlsModulePhase(materializer, islandName).asInstanceOf[PhaseIsland[Any]]
},
GraphStageTag -> DefaultPhase)
@@ -91,12 +96,13 @@ import akka.util.OptionVal
val streamSupervisor =
context.actorOf(StreamSupervisor.props(materializerSettings, haveShutDown), StreamSupervisor.nextName())
- PhasedFusingActorMaterializer(system,
- materializerSettings,
- system.dispatchers,
- streamSupervisor,
- haveShutDown,
- FlowNames(system).name.copy("flow"))
+ PhasedFusingActorMaterializer(
+ system,
+ materializerSettings,
+ system.dispatchers,
+ streamSupervisor,
+ haveShutDown,
+ FlowNames(system).name.copy("flow"))
}
private def actorSystemOf(context: ActorRefFactory): ActorSystem = {
@@ -113,11 +119,12 @@ import akka.util.OptionVal
}
-private final case class SegmentInfo(globalislandOffset: Int, // The island to which the segment belongs
- length: Int, // How many slots are contained by the segment
- globalBaseOffset: Int, // The global slot where this segment starts
- relativeBaseOffset: Int, // the local offset of the slot where this segment starts
- phase: PhaseIsland[Any]) {
+private final case class SegmentInfo(
+ globalislandOffset: Int, // The island to which the segment belongs
+ length: Int, // How many slots are contained by the segment
+ globalBaseOffset: Int, // The global slot where this segment starts
+ relativeBaseOffset: Int, // the local offset of the slot where this segment starts
+ phase: PhaseIsland[Any]) {
override def toString: String =
s"""
@@ -130,27 +137,30 @@ private final case class SegmentInfo(globalislandOffset: Int, // The island to w
""".stripMargin
}
-private final case class ForwardWire(islandGlobalOffset: Int,
- from: OutPort,
- toGlobalOffset: Int,
- outStage: Any,
- phase: PhaseIsland[Any]) {
+private final case class ForwardWire(
+ islandGlobalOffset: Int,
+ from: OutPort,
+ toGlobalOffset: Int,
+ outStage: Any,
+ phase: PhaseIsland[Any]) {
override def toString: String =
s"ForwardWire(islandId = $islandGlobalOffset, from = $from, toGlobal = $toGlobalOffset, phase = $phase)"
}
-private final case class SavedIslandData(islandGlobalOffset: Int,
- lastVisitedOffset: Int,
- skippedSlots: Int,
- phase: PhaseIsland[Any])
+private final case class SavedIslandData(
+ islandGlobalOffset: Int,
+ lastVisitedOffset: Int,
+ skippedSlots: Int,
+ phase: PhaseIsland[Any])
-@InternalApi private[akka] class IslandTracking(val phases: Map[IslandTag, Phase[Any]],
- val settings: ActorMaterializerSettings,
- attributes: Attributes,
- defaultPhase: Phase[Any],
- val materializer: PhasedFusingActorMaterializer,
- islandNamePrefix: String) {
+@InternalApi private[akka] class IslandTracking(
+ val phases: Map[IslandTag, Phase[Any]],
+ val settings: ActorMaterializerSettings,
+ attributes: Attributes,
+ defaultPhase: Phase[Any],
+ val materializer: PhasedFusingActorMaterializer,
+ islandNamePrefix: String) {
import PhasedFusingActorMaterializer.Debug
@@ -188,11 +198,12 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
if (length > 0) {
// We just finished a segment by entering an island.
- val previousSegment = SegmentInfo(globalislandOffset = currentIslandGlobalOffset,
- length = currentGlobalOffset - currentSegmentGlobalOffset,
- globalBaseOffset = currentSegmentGlobalOffset,
- relativeBaseOffset = currentSegmentGlobalOffset - currentIslandGlobalOffset - currentIslandSkippedSlots,
- currentPhase)
+ val previousSegment = SegmentInfo(
+ globalislandOffset = currentIslandGlobalOffset,
+ length = currentGlobalOffset - currentSegmentGlobalOffset,
+ globalBaseOffset = currentSegmentGlobalOffset,
+ relativeBaseOffset = currentSegmentGlobalOffset - currentIslandGlobalOffset - currentIslandSkippedSlots,
+ currentPhase)
// Segment tracking is by demand, we only allocate this list if it is used.
// If there are no islands, then there is no need to track segments
@@ -336,11 +347,12 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
forwardWires = new java.util.ArrayList[ForwardWire](8)
}
- val forwardWire = ForwardWire(islandGlobalOffset = currentIslandGlobalOffset,
- from = out,
- toGlobalOffset = absoluteOffset,
- logic,
- currentPhase)
+ val forwardWire = ForwardWire(
+ islandGlobalOffset = currentIslandGlobalOffset,
+ from = out,
+ toGlobalOffset = absoluteOffset,
+ logic,
+ currentPhase)
if (Debug) println(s" wiring is forward, recording $forwardWire")
forwardWires.add(forwardWire)
@@ -363,12 +375,13 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
/**
* INTERNAL API
*/
-@InternalApi private[akka] case class PhasedFusingActorMaterializer(system: ActorSystem,
- override val settings: ActorMaterializerSettings,
- dispatchers: Dispatchers,
- supervisor: ActorRef,
- haveShutDown: AtomicBoolean,
- flowNames: SeqActorName)
+@InternalApi private[akka] case class PhasedFusingActorMaterializer(
+ system: ActorSystem,
+ override val settings: ActorMaterializerSettings,
+ dispatchers: Dispatchers,
+ supervisor: ActorRef,
+ haveShutDown: AtomicBoolean,
+ flowNames: SeqActorName)
extends ExtendedActorMaterializer {
import PhasedFusingActorMaterializer._
@@ -417,9 +430,10 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
case other => other
})
- override def schedulePeriodically(initialDelay: FiniteDuration,
- interval: FiniteDuration,
- task: Runnable): Cancellable =
+ override def schedulePeriodically(
+ initialDelay: FiniteDuration,
+ interval: FiniteDuration,
+ task: Runnable): Cancellable =
system.scheduler.schedule(initialDelay, interval, task)(executionContext)
override def scheduleOnce(delay: FiniteDuration, task: Runnable): Cancellable =
@@ -429,22 +443,25 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
materialize(_runnableGraph, defaultAttributes)
override def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat], defaultAttributes: Attributes): Mat =
- materialize(_runnableGraph,
- defaultAttributes,
- PhasedFusingActorMaterializer.DefaultPhase,
- PhasedFusingActorMaterializer.DefaultPhases)
+ materialize(
+ _runnableGraph,
+ defaultAttributes,
+ PhasedFusingActorMaterializer.DefaultPhase,
+ PhasedFusingActorMaterializer.DefaultPhases)
- override def materialize[Mat](graph: Graph[ClosedShape, Mat],
- defaultAttributes: Attributes,
- defaultPhase: Phase[Any],
- phases: Map[IslandTag, Phase[Any]]): Mat = {
+ override def materialize[Mat](
+ graph: Graph[ClosedShape, Mat],
+ defaultAttributes: Attributes,
+ defaultPhase: Phase[Any],
+ phases: Map[IslandTag, Phase[Any]]): Mat = {
if (isShutdown) throw new IllegalStateException("Trying to materialize stream after materializer has been shutdown")
- val islandTracking = new IslandTracking(phases,
- settings,
- defaultAttributes,
- defaultPhase,
- this,
- islandNamePrefix = createFlowName() + "-")
+ val islandTracking = new IslandTracking(
+ phases,
+ settings,
+ defaultAttributes,
+ defaultPhase,
+ this,
+ islandNamePrefix = createFlowName() + "-")
var current: Traversal = graph.traversalBuilder.traversal
@@ -535,9 +552,10 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
}
- private def wireInlets(islandTracking: IslandTracking,
- mod: StreamLayout.AtomicModule[Shape, Any],
- logic: Any): Unit = {
+ private def wireInlets(
+ islandTracking: IslandTracking,
+ mod: StreamLayout.AtomicModule[Shape, Any],
+ logic: Any): Unit = {
val inlets = mod.shape.inlets
if (inlets.nonEmpty) {
if (Shape.hasOnePort(inlets)) {
@@ -553,11 +571,12 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
}
}
- private def wireOutlets(islandTracking: IslandTracking,
- mod: StreamLayout.AtomicModule[Shape, Any],
- logic: Any,
- stageGlobalOffset: Int,
- outToSlot: Array[Int]): Unit = {
+ private def wireOutlets(
+ islandTracking: IslandTracking,
+ mod: StreamLayout.AtomicModule[Shape, Any],
+ logic: Any,
+ stageGlobalOffset: Int,
+ outToSlot: Array[Int]): Unit = {
val outlets = mod.shape.outlets
if (outlets.nonEmpty) {
if (Shape.hasOnePort(outlets)) {
@@ -592,10 +611,11 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
* INTERNAL API
*/
@DoNotInherit private[akka] trait Phase[M] {
- def apply(settings: ActorMaterializerSettings,
- effectiveAttributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String): PhaseIsland[M]
+ def apply(
+ settings: ActorMaterializerSettings,
+ effectiveAttributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String): PhaseIsland[M]
}
/**
@@ -627,11 +647,12 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class GraphStageIsland(settings: ActorMaterializerSettings,
- effectiveAttributes: Attributes,
- materializer: PhasedFusingActorMaterializer,
- islandName: String,
- subflowFuser: OptionVal[GraphInterpreterShell => ActorRef])
+@InternalApi private[akka] final class GraphStageIsland(
+ settings: ActorMaterializerSettings,
+ effectiveAttributes: Attributes,
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String,
+ subflowFuser: OptionVal[GraphInterpreterShell => ActorRef])
extends PhaseIsland[GraphStageLogic] {
// TODO: remove these
private val logicArrayType = Array.empty[GraphStageLogic]
@@ -802,8 +823,9 @@ private final case class SavedIslandData(islandGlobalOffset: Int,
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class SourceModulePhase(materializer: PhasedFusingActorMaterializer,
- islandName: String)
+@InternalApi private[akka] final class SourceModulePhase(
+ materializer: PhasedFusingActorMaterializer,
+ islandName: String)
extends PhaseIsland[Publisher[Any]] {
override def name: String = s"SourceModule phase"
diff --git a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala
index 95cd63658f..1990ee4a18 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala
@@ -66,18 +66,21 @@ import scala.concurrent.{ Future, Promise }
} else
overflowStrategy match {
case s: DropHead =>
- log.log(s.logLevel,
- "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]")
+ log.log(
+ s.logLevel,
+ "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]")
buffer.dropHead()
enqueueAndSuccess(offer)
case s: DropTail =>
- log.log(s.logLevel,
- "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]")
+ log.log(
+ s.logLevel,
+ "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]")
buffer.dropTail()
enqueueAndSuccess(offer)
case s: DropBuffer =>
- log.log(s.logLevel,
- "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]")
+ log.log(
+ s.logLevel,
+ "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]")
buffer.clear()
enqueueAndSuccess(offer)
case s: DropNew =>
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala
index 7910aa39c9..2e6479ec95 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala
@@ -15,13 +15,16 @@ import akka.annotation.InternalApi
* Contrary to many other ring buffer implementations this one does not automatically overwrite the oldest
* elements, rather, if full, the buffer tries to grow and rejects further writes if max capacity is reached.
*/
-@InternalApi private[akka] class ResizableMultiReaderRingBuffer[T](initialSize: Int, // constructor param, not field
- maxSize: Int, // constructor param, not field
- val cursors: Cursors) {
- require(Integer.lowestOneBit(maxSize) == maxSize && 0 < maxSize && maxSize <= Int.MaxValue / 2,
- "maxSize must be a power of 2 that is > 0 and < Int.MaxValue/2")
- require(Integer.lowestOneBit(initialSize) == initialSize && 0 < initialSize && initialSize <= maxSize,
- "initialSize must be a power of 2 that is > 0 and <= maxSize")
+@InternalApi private[akka] class ResizableMultiReaderRingBuffer[T](
+ initialSize: Int, // constructor param, not field
+ maxSize: Int, // constructor param, not field
+ val cursors: Cursors) {
+ require(
+ Integer.lowestOneBit(maxSize) == maxSize && 0 < maxSize && maxSize <= Int.MaxValue / 2,
+ "maxSize must be a power of 2 that is > 0 and < Int.MaxValue/2")
+ require(
+ Integer.lowestOneBit(initialSize) == initialSize && 0 < initialSize && initialSize <= maxSize,
+ "initialSize must be a power of 2 that is > 0 and <= maxSize")
private[this] val maxSizeBit = Integer.numberOfTrailingZeros(maxSize)
private[this] var array = new Array[Any](initialSize)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala
index 1acc0deec8..1155c59aa4 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala
@@ -122,9 +122,10 @@ import akka.util.ccompat._
* INTERNAL API
* Attaches a subscriber to this stream.
*/
-@InternalApi private[akka] final class SubscriberSink[In](subscriber: Subscriber[In],
- val attributes: Attributes,
- shape: SinkShape[In])
+@InternalApi private[akka] final class SubscriberSink[In](
+ subscriber: Subscriber[In],
+ val attributes: Attributes,
+ shape: SinkShape[In])
extends SinkModule[In, NotUsed](shape) {
override def create(context: MaterializationContext) = (subscriber, NotUsed)
@@ -153,9 +154,10 @@ import akka.util.ccompat._
* Creates and wraps an actor into [[org.reactivestreams.Subscriber]] from the given `props`,
* which should be [[akka.actor.Props]] for an [[akka.stream.actor.ActorSubscriber]].
*/
-@InternalApi private[akka] final class ActorSubscriberSink[In](props: Props,
- val attributes: Attributes,
- shape: SinkShape[In])
+@InternalApi private[akka] final class ActorSubscriberSink[In](
+ props: Props,
+ val attributes: Attributes,
+ shape: SinkShape[In])
extends SinkModule[In, ActorRef](shape) {
override def create(context: MaterializationContext) = {
@@ -172,11 +174,12 @@ import akka.util.ccompat._
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class ActorRefSink[In](ref: ActorRef,
- onCompleteMessage: Any,
- onFailureMessage: Throwable => Any,
- val attributes: Attributes,
- shape: SinkShape[In])
+@InternalApi private[akka] final class ActorRefSink[In](
+ ref: ActorRef,
+ onCompleteMessage: Any,
+ onFailureMessage: Throwable => Any,
+ val attributes: Attributes,
+ shape: SinkShape[In])
extends SinkModule[In, NotUsed](shape) {
override def create(context: MaterializationContext) = {
@@ -570,23 +573,24 @@ import akka.util.ccompat._
// The stage must not be shut down automatically; it is completed when maybeCompleteStage decides
setKeepGoing(true)
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = {
- subOutlet.push(grab(in))
- }
- override def onUpstreamFinish(): Unit = {
- if (firstElementPushed) {
- subOutlet.complete()
- maybeCompleteStage()
- }
- }
- override def onUpstreamFailure(ex: Throwable): Unit = {
- // propagate exception irrespective if the cached element has been pushed or not
- subOutlet.fail(ex)
- maybeCompleteStage()
- }
- })
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush(): Unit = {
+ subOutlet.push(grab(in))
+ }
+ override def onUpstreamFinish(): Unit = {
+ if (firstElementPushed) {
+ subOutlet.complete()
+ maybeCompleteStage()
+ }
+ }
+ override def onUpstreamFailure(ex: Throwable): Unit = {
+ // propagate exception irrespective if the cached element has been pushed or not
+ subOutlet.fail(ex)
+ maybeCompleteStage()
+ }
+ })
subOutlet.setHandler(new OutHandler {
override def onPull(): Unit = {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala
index 55b8e7fe5c..e3cc2e3bff 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala
@@ -47,9 +47,10 @@ import scala.util.control.NonFatal
}
final case class Both(subscriber: Subscriber[Any]) extends HasActualSubscriber
- final case class Establishing(subscriber: Subscriber[Any],
- onCompleteBuffered: Boolean = false,
- onErrorBuffered: OptionVal[Throwable] = OptionVal.None)
+ final case class Establishing(
+ subscriber: Subscriber[Any],
+ onCompleteBuffered: Boolean = false,
+ onErrorBuffered: OptionVal[Throwable] = OptionVal.None)
extends HasActualSubscriber
object Establishing {
def create(s: Subscriber[_]) = Establishing(s.asInstanceOf[Subscriber[Any]])
diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala
index c64f989fa6..b6e41ca651 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala
@@ -92,10 +92,11 @@ import scala.util.control.NoStackTrace
case p: Publisher[_] =>
log.debug("Cancelling {} (after: {} ms)", p, millis)
- handleSubscriptionTimeout(target,
- new SubscriptionTimeoutException(
- s"Publisher ($p) you are trying to subscribe to has been shut-down " +
- s"because exceeding it's subscription-timeout.") with NoStackTrace)
+ handleSubscriptionTimeout(
+ target,
+ new SubscriptionTimeoutException(
+ s"Publisher ($p) you are trying to subscribe to has been shut-down " +
+ s"because exceeding it's subscription-timeout.") with NoStackTrace)
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala
index b80593bd5f..6b6acbd3ca 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala
@@ -23,9 +23,10 @@ import language.higherKinds
/**
* INTERNAL API
*/
-@InternalApi private[akka] class SubFlowImpl[In, Out, Mat, F[+ _], C](val subFlow: Flow[In, Out, NotUsed],
- mergeBackFunction: SubFlowImpl.MergeBack[In, F],
- finishFunction: Sink[In, NotUsed] => C)
+@InternalApi private[akka] class SubFlowImpl[In, Out, Mat, F[+ _], C](
+ val subFlow: Flow[In, Out, NotUsed],
+ mergeBackFunction: SubFlowImpl.MergeBack[In, F],
+ finishFunction: Sink[In, NotUsed] => C)
extends SubFlow[Out, Mat, F, C] {
override def via[T, Mat2](flow: Graph[FlowShape[Out, T], Mat2]): Repr[T] =
diff --git a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala
index a438ccfb27..eef9c77b96 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala
@@ -23,11 +23,12 @@ import scala.concurrent.duration.{ FiniteDuration, _ }
/**
* INTERNAL API
*/
-@InternalApi private[akka] class Throttle[T](val cost: Int,
- val per: FiniteDuration,
- val maximumBurst: Int,
- val costCalculation: (T) => Int,
- val mode: ThrottleMode)
+@InternalApi private[akka] class Throttle[T](
+ val cost: Int,
+ val per: FiniteDuration,
+ val maximumBurst: Int,
+ val costCalculation: (T) => Int,
+ val mode: ThrottleMode)
extends SimpleLinearGraphStage[T] {
require(cost > 0, "cost must be > 0")
require(per.toNanos > 0, "per time must be > 0")
diff --git a/akka-stream/src/main/scala/akka/stream/impl/Timers.scala b/akka-stream/src/main/scala/akka/stream/impl/Timers.scala
index ac95c69a07..12ebedb097 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/Timers.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/Timers.scala
@@ -35,8 +35,9 @@ import scala.concurrent.duration.{ Duration, FiniteDuration }
import scala.concurrent.duration._
if (timeout > 1.second) 1.second
else {
- FiniteDuration(math.min(math.max(timeout.toNanos / 8, 100.millis.toNanos), timeout.toNanos / 2),
- TimeUnit.NANOSECONDS)
+ FiniteDuration(
+ math.min(math.max(timeout.toNanos / 8, 100.millis.toNanos), timeout.toNanos / 2),
+ TimeUnit.NANOSECONDS)
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala b/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala
index 13326f3d44..b71b20c4df 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala
@@ -264,10 +264,11 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
Attributes.none)
b
} else {
- AtomicTraversalBuilder(module,
- new Array[Int](module.shape.outlets.size),
- module.shape.outlets.size,
- Attributes.none)
+ AtomicTraversalBuilder(
+ module,
+ new Array[Int](module.shape.outlets.size),
+ module.shape.outlets.size,
+ Attributes.none)
}
// important to use setAttributes because it will create island for async (dispatcher attribute)
builder.setAttributes(attributes)
@@ -484,20 +485,22 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
* Returned by [[CompositeTraversalBuilder]] once all output ports of a subgraph has been wired.
* See comments in akka.stream.impl.package for more details.
*/
-@InternalApi private[akka] final case class CompletedTraversalBuilder(traversalSoFar: Traversal,
- inSlots: Int,
- inToOffset: Map[InPort, Int],
- attributes: Attributes,
- islandTag: OptionVal[IslandTag] = OptionVal.None)
+@InternalApi private[akka] final case class CompletedTraversalBuilder(
+ traversalSoFar: Traversal,
+ inSlots: Int,
+ inToOffset: Map[InPort, Int],
+ attributes: Attributes,
+ islandTag: OptionVal[IslandTag] = OptionVal.None)
extends TraversalBuilder {
override def add(submodule: TraversalBuilder, shape: Shape, combineMat: AnyFunction2): TraversalBuilder = {
val key = new BuilderKey
- CompositeTraversalBuilder(reverseBuildSteps = key :: Nil,
- inSlots = inSlots,
- inOffsets = inToOffset,
- pendingBuilders = Map(key -> this),
- attributes = attributes).add(submodule, shape, combineMat)
+ CompositeTraversalBuilder(
+ reverseBuildSteps = key :: Nil,
+ inSlots = inSlots,
+ inOffsets = inToOffset,
+ pendingBuilders = Map(key -> this),
+ attributes = attributes).add(submodule, shape, combineMat)
}
override def traversal: Traversal = {
@@ -549,10 +552,11 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
* outToSlot array which will be then embedded in a [[MaterializeAtomic]] Traversal step.
* See comments in akka.stream.impl.package for more details.
*/
-@InternalApi private[akka] final case class AtomicTraversalBuilder(module: AtomicModule[Shape, Any],
- outToSlot: Array[Int],
- unwiredOuts: Int,
- attributes: Attributes)
+@InternalApi private[akka] final case class AtomicTraversalBuilder(
+ module: AtomicModule[Shape, Any],
+ outToSlot: Array[Int],
+ unwiredOuts: Int,
+ attributes: Attributes)
extends TraversalBuilder {
override def add(submodule: TraversalBuilder, shape: Shape, combineMat: AnyFunction2): TraversalBuilder = {
@@ -592,10 +596,11 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
else if (Shape.hasOnePort(inlets)) new Map1(inlets.head, inlets.head.id)
else inlets.iterator.map(in => in.asInstanceOf[InPort] -> in.id).toMap
}
- CompletedTraversalBuilder(traversalSoFar = MaterializeAtomic(module, newOutToSlot),
- inSlots,
- inToOffset,
- attributes)
+ CompletedTraversalBuilder(
+ traversalSoFar = MaterializeAtomic(module, newOutToSlot),
+ inSlots,
+ inToOffset,
+ attributes)
} else copy(outToSlot = newOutToSlot, unwiredOuts = newUnwiredOuts)
}
@@ -621,14 +626,15 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
def empty(attributes: Attributes = Attributes.none): LinearTraversalBuilder =
if (attributes eq Attributes.none) cachedEmptyLinear
else
- LinearTraversalBuilder(OptionVal.None,
- OptionVal.None,
- 0,
- 0,
- PushNotUsed,
- OptionVal.None,
- attributes,
- EmptyTraversal)
+ LinearTraversalBuilder(
+ OptionVal.None,
+ OptionVal.None,
+ 0,
+ 0,
+ PushNotUsed,
+ OptionVal.None,
+ attributes,
+ EmptyTraversal)
/**
* Create a traversal builder specialized for linear graphs. This is designed to be much faster and lightweight
@@ -646,13 +652,14 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
val wiring = if (outPortOpt.isDefined) wireBackward else noWire
- LinearTraversalBuilder(inPortOpt,
- outPortOpt,
- inOffset = 0,
- if (inPortOpt.isDefined) 1 else 0,
- traversalSoFar = MaterializeAtomic(module, wiring),
- pendingBuilder = OptionVal.None,
- attributes)
+ LinearTraversalBuilder(
+ inPortOpt,
+ outPortOpt,
+ inOffset = 0,
+ if (inPortOpt.isDefined) 1 else 0,
+ traversalSoFar = MaterializeAtomic(module, wiring),
+ pendingBuilder = OptionVal.None,
+ attributes)
}
def addMatCompose(t: Traversal, matCompose: AnyFunction2): Traversal = {
@@ -666,9 +673,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
t.concat(Compose(matCompose, reverse = true))
}
- def fromBuilder(traversalBuilder: TraversalBuilder,
- shape: Shape,
- combine: AnyFunction2 = Keep.right): LinearTraversalBuilder = {
+ def fromBuilder(
+ traversalBuilder: TraversalBuilder,
+ shape: Shape,
+ combine: AnyFunction2 = Keep.right): LinearTraversalBuilder = {
traversalBuilder match {
case linear: LinearTraversalBuilder =>
if (combine eq Keep.right) linear
@@ -681,13 +689,14 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
case OptionVal.None => 0
}
- LinearTraversalBuilder(inPort = OptionVal(inOpt.orNull),
- outPort = OptionVal.None,
- inOffset = inOffs,
- inSlots = completed.inSlots,
- completed.traversal.concat(addMatCompose(PushNotUsed, combine)),
- pendingBuilder = OptionVal.None,
- Attributes.none)
+ LinearTraversalBuilder(
+ inPort = OptionVal(inOpt.orNull),
+ outPort = OptionVal.None,
+ inOffset = inOffs,
+ inSlots = completed.inSlots,
+ completed.traversal.concat(addMatCompose(PushNotUsed, combine)),
+ pendingBuilder = OptionVal.None,
+ Attributes.none)
case composite =>
val inOpt = OptionVal(shape.inlets.headOption.orNull)
@@ -697,14 +706,15 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
case OptionVal.None => 0
}
- LinearTraversalBuilder(inPort = OptionVal(inOpt.orNull),
- outPort = OptionVal.Some(out),
- inOffset = inOffs,
- inSlots = composite.inSlots,
- addMatCompose(PushNotUsed, combine),
- pendingBuilder = OptionVal.Some(composite),
- Attributes.none,
- beforeBuilder = EmptyTraversal)
+ LinearTraversalBuilder(
+ inPort = OptionVal(inOpt.orNull),
+ outPort = OptionVal.Some(out),
+ inOffset = inOffs,
+ inSlots = composite.inSlots,
+ addMatCompose(PushNotUsed, combine),
+ pendingBuilder = OptionVal.Some(composite),
+ Attributes.none,
+ beforeBuilder = EmptyTraversal)
}
}
@@ -721,15 +731,16 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
* -1 relative offset to something else (see rewireLastOutTo).
* See comments in akka.stream.impl.package for more details.
*/
-@InternalApi private[akka] final case class LinearTraversalBuilder(inPort: OptionVal[InPort],
- outPort: OptionVal[OutPort],
- inOffset: Int,
- override val inSlots: Int,
- traversalSoFar: Traversal,
- pendingBuilder: OptionVal[TraversalBuilder],
- attributes: Attributes,
- beforeBuilder: Traversal = EmptyTraversal,
- islandTag: OptionVal[IslandTag] = OptionVal.None)
+@InternalApi private[akka] final case class LinearTraversalBuilder(
+ inPort: OptionVal[InPort],
+ outPort: OptionVal[OutPort],
+ inOffset: Int,
+ override val inSlots: Int,
+ traversalSoFar: Traversal,
+ pendingBuilder: OptionVal[TraversalBuilder],
+ attributes: Attributes,
+ beforeBuilder: Traversal = EmptyTraversal,
+ islandTag: OptionVal[IslandTag] = OptionVal.None)
extends TraversalBuilder {
protected def isEmpty: Boolean = inSlots == 0 && outPort.isEmpty
@@ -787,18 +798,20 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
if (outPort.contains(out) && inPort.contains(in)) {
pendingBuilder match {
case OptionVal.Some(composite) =>
- copy(inPort = OptionVal.None,
- outPort = OptionVal.None,
- traversalSoFar = applyIslandAndAttributes(
- beforeBuilder
- .concat(composite.assign(out, inOffset - composite.offsetOfModule(out)).traversal)
- .concat(traversalSoFar)),
- pendingBuilder = OptionVal.None,
- beforeBuilder = EmptyTraversal)
+ copy(
+ inPort = OptionVal.None,
+ outPort = OptionVal.None,
+ traversalSoFar = applyIslandAndAttributes(
+ beforeBuilder
+ .concat(composite.assign(out, inOffset - composite.offsetOfModule(out)).traversal)
+ .concat(traversalSoFar)),
+ pendingBuilder = OptionVal.None,
+ beforeBuilder = EmptyTraversal)
case OptionVal.None =>
- copy(inPort = OptionVal.None,
- outPort = OptionVal.None,
- traversalSoFar = rewireLastOutTo(traversalSoFar, inOffset))
+ copy(
+ inPort = OptionVal.None,
+ outPort = OptionVal.None,
+ traversalSoFar = rewireLastOutTo(traversalSoFar, inOffset))
}
} else
throw new IllegalArgumentException(s"The ports $in and $out cannot be accessed in this builder.")
@@ -827,11 +840,12 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
if (outPort.contains(out)) {
pendingBuilder match {
case OptionVal.Some(composite) =>
- copy(outPort = OptionVal.None,
- traversalSoFar = applyIslandAndAttributes(
- beforeBuilder.concat(composite.assign(out, relativeSlot).traversal.concat(traversalSoFar))),
- pendingBuilder = OptionVal.None,
- beforeBuilder = EmptyTraversal)
+ copy(
+ outPort = OptionVal.None,
+ traversalSoFar = applyIslandAndAttributes(
+ beforeBuilder.concat(composite.assign(out, relativeSlot).traversal.concat(traversalSoFar))),
+ pendingBuilder = OptionVal.None,
+ beforeBuilder = EmptyTraversal)
case OptionVal.None =>
copy(outPort = OptionVal.None, traversalSoFar = rewireLastOutTo(traversalSoFar, relativeSlot))
}
@@ -1000,19 +1014,18 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
* This is the simple case, when the other is purely linear. We just concatenate the traversals
* and do some bookkeeping.
*/
- LinearTraversalBuilder(inPort = inPort,
- outPort = toAppend.outPort,
- inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before
- // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_
- inOffset = inOffset + toAppend.inSlots,
- // Build in reverse so it yields a more efficient layout for left-to-right building
- traversalSoFar = toAppend
- .applyIslandAndAttributes(toAppend.traversalSoFar)
- .concat(finalTraversalForThis),
- pendingBuilder = OptionVal.None,
- attributes = Attributes.none, // attributes are none for the new enclosing builder
- beforeBuilder = EmptyTraversal, // no need for beforeBuilder as there are no composites
- islandTag = OptionVal.None // islandTag is reset for the new enclosing builder
+ LinearTraversalBuilder(
+ inPort = inPort,
+ outPort = toAppend.outPort,
+ inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before
+ // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_
+ inOffset = inOffset + toAppend.inSlots,
+ // Build in reverse so it yields a more efficient layout for left-to-right building
+ traversalSoFar = toAppend.applyIslandAndAttributes(toAppend.traversalSoFar).concat(finalTraversalForThis),
+ pendingBuilder = OptionVal.None,
+ attributes = Attributes.none, // attributes are none for the new enclosing builder
+ beforeBuilder = EmptyTraversal, // no need for beforeBuilder as there are no composites
+ islandTag = OptionVal.None // islandTag is reset for the new enclosing builder
)
case OptionVal.Some(_) =>
@@ -1053,19 +1066,20 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
// Finally add the already completed part of toAppend to newTraversalSoFar
newTraversalSoFar = toAppend.traversalSoFar.concat(newTraversalSoFar)
- LinearTraversalBuilder(inPort = inPort,
- outPort = toAppend.outPort,
- inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before
- // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_
- inOffset = inOffset + toAppend.inSlots,
- // Build in reverse so it yields a more efficient layout for left-to-right building. We cannot
- // apply the full traversal, only the completed part of it
- traversalSoFar = newTraversalSoFar,
- // Last composite of toAppend is still pending
- pendingBuilder = toAppend.pendingBuilder,
- attributes = Attributes.none, // attributes are none for the new enclosing builder
- beforeBuilder = newBeforeTraversal, // no need for beforeBuilder as there are no composites
- islandTag = OptionVal.None // islandTag is reset for the new enclosing builder
+ LinearTraversalBuilder(
+ inPort = inPort,
+ outPort = toAppend.outPort,
+ inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before
+ // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_
+ inOffset = inOffset + toAppend.inSlots,
+ // Build in reverse so it yields a more efficient layout for left-to-right building. We cannot
+ // apply the full traversal, only the completed part of it
+ traversalSoFar = newTraversalSoFar,
+ // Last composite of toAppend is still pending
+ pendingBuilder = toAppend.pendingBuilder,
+ attributes = Attributes.none, // attributes are none for the new enclosing builder
+ beforeBuilder = newBeforeTraversal, // no need for beforeBuilder as there are no composites
+ islandTag = OptionVal.None // islandTag is reset for the new enclosing builder
)
}
} else throw new Exception("should this happen?")
@@ -1218,17 +1232,19 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
// final traversal (remember, input ports are assigned in traversal order of modules, and the inOffsets
// and inBaseOffseForOut Maps are updated when adding a module; we must respect addition order).
- copy(inBaseOffsetForOut = inBaseOffsetForOut - out,
- outOwners = outOwners - out,
- // TODO Optimize Map access
- pendingBuilders = pendingBuilders.updated(builderKey, result),
- // pendingBuilders = pendingBuilders - builderKey,
- unwiredOuts = unwiredOuts - 1)
+ copy(
+ inBaseOffsetForOut = inBaseOffsetForOut - out,
+ outOwners = outOwners - out,
+ // TODO Optimize Map access
+ pendingBuilders = pendingBuilders.updated(builderKey, result),
+ // pendingBuilders = pendingBuilders - builderKey,
+ unwiredOuts = unwiredOuts - 1)
} else {
// Update structures with result
- copy(inBaseOffsetForOut = inBaseOffsetForOut - out,
- unwiredOuts = unwiredOuts - 1,
- pendingBuilders = pendingBuilders.updated(builderKey, result))
+ copy(
+ inBaseOffsetForOut = inBaseOffsetForOut - out,
+ unwiredOuts = unwiredOuts - 1,
+ pendingBuilders = pendingBuilders.updated(builderKey, result))
}
// If we have no more unconnected outputs, we can finally build the Traversal and shed most of the auxiliary data.
@@ -1274,10 +1290,11 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
newInOffsets = newInOffsets.updated(in, inSlots + submodule.offsetOf(in.mappedTo))
}
- copy(reverseBuildSteps = newBuildSteps,
- inSlots = inSlots + submodule.inSlots,
- pendingBuilders = pendingBuilders.updated(builderKey, submodule),
- inOffsets = newInOffsets)
+ copy(
+ reverseBuildSteps = newBuildSteps,
+ inSlots = inSlots + submodule.inSlots,
+ pendingBuilders = pendingBuilders.updated(builderKey, submodule),
+ inOffsets = newInOffsets)
} else {
// Added module have unwired outputs.
@@ -1304,13 +1321,14 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
newOutOwners = newOutOwners.updated(out, builderKey)
}
- copy(reverseBuildSteps = newBuildSteps,
- inSlots = inSlots + submodule.inSlots,
- inOffsets = newInOffsets,
- inBaseOffsetForOut = newBaseOffsetsForOut,
- outOwners = newOutOwners,
- pendingBuilders = pendingBuilders.updated(builderKey, submodule),
- unwiredOuts = unwiredOuts + submodule.unwiredOuts)
+ copy(
+ reverseBuildSteps = newBuildSteps,
+ inSlots = inSlots + submodule.inSlots,
+ inOffsets = newInOffsets,
+ inBaseOffsetForOut = newBaseOffsetsForOut,
+ outOwners = newOutOwners,
+ pendingBuilders = pendingBuilders.updated(builderKey, submodule),
+ unwiredOuts = unwiredOuts + submodule.unwiredOuts)
}
added.completeIfPossible
diff --git a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala
index 6c2f2958d7..e3974ac951 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala
@@ -16,9 +16,10 @@ import scala.util.control.NonFatal
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class UnfoldResourceSource[T, S](create: () => S,
- readData: (S) => Option[T],
- close: (S) => Unit)
+@InternalApi private[akka] final class UnfoldResourceSource[T, S](
+ create: () => S,
+ readData: (S) => Option[T],
+ close: (S) => Unit)
extends GraphStage[SourceShape[T]] {
val out = Outlet[T]("UnfoldResourceSource.out")
override val shape = SourceShape(out)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala
index 8411999d6a..14fcbbb5a9 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala
@@ -19,9 +19,10 @@ import scala.util.control.NonFatal
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class UnfoldResourceSourceAsync[T, S](create: () => Future[S],
- readData: (S) => Future[Option[T]],
- close: (S) => Future[Done])
+@InternalApi private[akka] final class UnfoldResourceSourceAsync[T, S](
+ create: () => Future[S],
+ readData: (S) => Future[Option[T]],
+ close: (S) => Future[Done])
extends GraphStage[SourceShape[T]] {
val out = Outlet[T]("UnfoldResourceSourceAsync.out")
override val shape = SourceShape(out)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala
index 62078d4c9a..419c1c7057 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala
@@ -55,10 +55,11 @@ import scala.util.control.NonFatal
def props(shell: GraphInterpreterShell): Props =
Props(new ActorGraphInterpreter(shell)).withDeploy(Deploy.local)
- class BatchingActorInputBoundary(size: Int,
- shell: GraphInterpreterShell,
- publisher: Publisher[Any],
- internalPortName: String)
+ class BatchingActorInputBoundary(
+ size: Int,
+ shell: GraphInterpreterShell,
+ publisher: Publisher[Any],
+ internalPortName: String)
extends UpstreamBoundaryStageLogic[Any]
with OutHandler {
@@ -447,11 +448,12 @@ import scala.util.control.NonFatal
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class GraphInterpreterShell(var connections: Array[Connection],
- var logics: Array[GraphStageLogic],
- settings: ActorMaterializerSettings,
- attributes: Attributes,
- val mat: ExtendedActorMaterializer) {
+@InternalApi private[akka] final class GraphInterpreterShell(
+ var connections: Array[Connection],
+ var logics: Array[GraphStageLogic],
+ settings: ActorMaterializerSettings,
+ attributes: Attributes,
+ val mat: ExtendedActorMaterializer) {
import ActorGraphInterpreter._
@@ -462,11 +464,12 @@ import scala.util.control.NonFatal
* @param promise Will be completed upon processing the event, or failed if processing the event throws
* if the event isn't ever processed the promise (the operator stops) is failed elsewhere
*/
- final case class AsyncInput(shell: GraphInterpreterShell,
- logic: GraphStageLogic,
- evt: Any,
- promise: Promise[Done],
- handler: (Any) => Unit)
+ final case class AsyncInput(
+ shell: GraphInterpreterShell,
+ logic: GraphStageLogic,
+ evt: Any,
+ promise: Promise[Done],
+ handler: (Any) => Unit)
extends BoundaryEvent {
override def execute(eventLimit: Int): Int = {
if (!waitingForShutdown) {
@@ -539,10 +542,11 @@ import scala.util.control.NonFatal
private var resumeScheduled = false
def isInitialized: Boolean = self != null
- def init(self: ActorRef,
- subMat: SubFusingActorMaterializerImpl,
- enqueueToShortCircuit: (Any) => Unit,
- eventLimit: Int): Int = {
+ def init(
+ self: ActorRef,
+ subMat: SubFusingActorMaterializerImpl,
+ enqueueToShortCircuit: (Any) => Unit,
+ eventLimit: Int): Int = {
this.self = self
this.enqueueToShortCircuit = enqueueToShortCircuit
var i = 0
@@ -758,11 +762,10 @@ import scala.util.control.NonFatal
if (shortCircuitBuffer != null) shortCircuitBatch()
case Snapshot =>
- sender() ! StreamSnapshotImpl(self.path,
- activeInterpreters
- .map(shell => shell.toSnapshot.asInstanceOf[RunningInterpreter])
- .toSeq,
- newShells.map(shell => shell.toSnapshot.asInstanceOf[UninitializedInterpreter]))
+ sender() ! StreamSnapshotImpl(
+ self.path,
+ activeInterpreters.map(shell => shell.toSnapshot.asInstanceOf[RunningInterpreter]).toSeq,
+ newShells.map(shell => shell.toSnapshot.asInstanceOf[UninitializedInterpreter]))
}
override def postStop(): Unit = {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala
index d819777bbb..e17da07184 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala
@@ -78,11 +78,12 @@ import akka.stream.snapshot._
* @param inHandler The handler that contains the callback for input events.
* @param outHandler The handler that contains the callback for output events.
*/
- final class Connection(var id: Int,
- var inOwner: GraphStageLogic,
- var outOwner: GraphStageLogic,
- var inHandler: InHandler,
- var outHandler: OutHandler) {
+ final class Connection(
+ var id: Int,
+ var inOwner: GraphStageLogic,
+ var outOwner: GraphStageLogic,
+ var inHandler: InHandler,
+ var outHandler: OutHandler) {
var portState: Int = InReady
var slot: Any = Empty
}
@@ -662,22 +663,24 @@ import akka.stream.snapshot._
}
val logicIndexes = logics.zipWithIndex.map { case (stage, idx) => stage -> idx }.toMap
val connectionSnapshots = connections.filter(_ != null).map { connection =>
- ConnectionSnapshotImpl(connection.id,
- logicSnapshots(logicIndexes(connection.inOwner)),
- logicSnapshots(logicIndexes(connection.outOwner)),
- connection.portState match {
- case InReady => ConnectionSnapshot.ShouldPull
- case OutReady => ConnectionSnapshot.ShouldPush
- case x if (x | InClosed | OutClosed) == (InClosed | OutClosed) =>
- ConnectionSnapshot.Closed
- })
+ ConnectionSnapshotImpl(
+ connection.id,
+ logicSnapshots(logicIndexes(connection.inOwner)),
+ logicSnapshots(logicIndexes(connection.outOwner)),
+ connection.portState match {
+ case InReady => ConnectionSnapshot.ShouldPull
+ case OutReady => ConnectionSnapshot.ShouldPush
+ case x if (x | InClosed | OutClosed) == (InClosed | OutClosed) =>
+ ConnectionSnapshot.Closed
+ })
}
- RunningInterpreterImpl(logicSnapshots.toVector,
- connectionSnapshots.toVector,
- queueStatus,
- runningStages,
- shutdownCounter.toList.map(n => logicSnapshots(n)))
+ RunningInterpreterImpl(
+ logicSnapshots.toVector,
+ connectionSnapshots.toVector,
+ queueStatus,
+ runningStages,
+ shutdownCounter.toList.map(n => logicSnapshots(n)))
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala
index f7f6361cb8..23f61a7ea8 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala
@@ -302,23 +302,23 @@ import scala.concurrent.{ Future, Promise }
}
// initial handler (until future completes)
- setHandler(out,
- new OutHandler {
- def onPull(): Unit = {}
+ setHandler(
+ out,
+ new OutHandler {
+ def onPull(): Unit = {}
- override def onDownstreamFinish(): Unit = {
- if (!materialized.isCompleted) {
- // we used to try to materialize the "inner" source here just to get
- // the materialized value, but that is not safe and may cause the graph shell
- // to leak/stay alive after the stage completes
+ override def onDownstreamFinish(): Unit = {
+ if (!materialized.isCompleted) {
+ // we used to try to materialize the "inner" source here just to get
+ // the materialized value, but that is not safe and may cause the graph shell
+ // to leak/stay alive after the stage completes
- materialized.tryFailure(
- new StreamDetachedException("Stream cancelled before Source Future completed"))
- }
+ materialized.tryFailure(new StreamDetachedException("Stream cancelled before Source Future completed"))
+ }
- super.onDownstreamFinish()
- }
- })
+ super.onDownstreamFinish()
+ }
+ })
def onPush(): Unit =
push(out, sinkIn.grab())
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala
index df5f8d2483..0f74918fab 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala
@@ -394,18 +394,19 @@ private[stream] object Collect {
}
})
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = ()
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush(): Unit = ()
- override def onUpstreamFinish(): Unit =
- setHandler(out, new OutHandler {
- override def onPull(): Unit = {
- push(out, aggregator)
- completeStage()
- }
- })
- })
+ override def onUpstreamFinish(): Unit =
+ setHandler(out, new OutHandler {
+ override def onPull(): Unit = {
+ push(out, aggregator)
+ completeStage()
+ }
+ })
+ })
override def onPull(): Unit = pull(in)
@@ -910,8 +911,9 @@ private[stream] object Collect {
case s: DropHead =>
elem =>
if (buffer.isFull) {
- log.log(s.logLevel,
- "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]")
+ log.log(
+ s.logLevel,
+ "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]")
buffer.dropHead()
}
buffer.enqueue(elem)
@@ -919,8 +921,9 @@ private[stream] object Collect {
case s: DropTail =>
elem =>
if (buffer.isFull) {
- log.log(s.logLevel,
- "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]")
+ log.log(
+ s.logLevel,
+ "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]")
buffer.dropTail()
}
buffer.enqueue(elem)
@@ -939,8 +942,9 @@ private[stream] object Collect {
elem =>
if (!buffer.isFull) buffer.enqueue(elem)
else
- log.log(s.logLevel,
- "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]")
+ log.log(
+ s.logLevel,
+ "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]")
pull(in)
case s: Backpressure =>
elem =>
@@ -997,10 +1001,11 @@ private[stream] object Collect {
/**
* INTERNAL API
*/
-@InternalApi private[akka] final case class Batch[In, Out](val max: Long,
- val costFn: In => Long,
- val seed: In => Out,
- val aggregate: (Out, In) => Out)
+@InternalApi private[akka] final case class Batch[In, Out](
+ val max: Long,
+ val costFn: In => Long,
+ val seed: In => Out,
+ val aggregate: (Out, In) => Out)
extends GraphStage[FlowShape[In, Out]] {
val in = Inlet[In]("Batch.in")
@@ -1479,11 +1484,12 @@ private[stream] object Collect {
logLevels.onFailure match {
case Logging.ErrorLevel => log.error(cause, "[{}] Upstream failed.", name)
case level =>
- log.log(level,
- "[{}] Upstream failed, cause: {}: {}",
- name,
- Logging.simpleName(cause.getClass),
- cause.getMessage)
+ log.log(
+ level,
+ "[{}] Upstream failed, cause: {}: {}",
+ name,
+ Logging.simpleName(cause.getClass),
+ cause.getMessage)
}
super.onUpstreamFailure(cause)
@@ -1558,9 +1564,10 @@ private[stream] object Collect {
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class GroupedWeightedWithin[T](val maxWeight: Long,
- costFn: T => Long,
- val interval: FiniteDuration)
+@InternalApi private[akka] final class GroupedWeightedWithin[T](
+ val maxWeight: Long,
+ costFn: T => Long,
+ val interval: FiniteDuration)
extends GraphStage[FlowShape[T, immutable.Seq[T]]] {
require(maxWeight > 0, "maxWeight must be greater than 0")
require(interval > Duration.Zero)
@@ -1936,8 +1943,9 @@ private[stream] object Collect {
*/
@InternalApi private[stream] object RecoverWith
-@InternalApi private[akka] final class RecoverWith[T, M](val maximumRetries: Int,
- val pf: PartialFunction[Throwable, Graph[SourceShape[T], M]])
+@InternalApi private[akka] final class RecoverWith[T, M](
+ val maximumRetries: Int,
+ val pf: PartialFunction[Throwable, Graph[SourceShape[T], M]])
extends SimpleLinearGraphStage[T] {
override def initialAttributes = DefaultAttributes.recoverWith
@@ -2175,23 +2183,24 @@ private[stream] object Collect {
// The stage must not be shut down automatically; it is completed when maybeCompleteStage decides
setKeepGoing(true)
- setHandler(in,
- new InHandler {
- override def onPush(): Unit = {
- subOutlet.push(grab(in))
- }
- override def onUpstreamFinish(): Unit = {
- if (firstElementPushed) {
- subOutlet.complete()
- maybeCompleteStage()
- }
- }
- override def onUpstreamFailure(ex: Throwable): Unit = {
- // propagate exception irrespective if the cached element has been pushed or not
- subOutlet.fail(ex)
- maybeCompleteStage()
- }
- })
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush(): Unit = {
+ subOutlet.push(grab(in))
+ }
+ override def onUpstreamFinish(): Unit = {
+ if (firstElementPushed) {
+ subOutlet.complete()
+ maybeCompleteStage()
+ }
+ }
+ override def onUpstreamFailure(ex: Throwable): Unit = {
+ // propagate exception irrespective if the cached element has been pushed or not
+ subOutlet.fail(ex)
+ maybeCompleteStage()
+ }
+ })
setHandler(out, new OutHandler {
override def onPull(): Unit = {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala
index 1723b0fd4e..a471f4cb50 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala
@@ -169,9 +169,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
case StreamSubscriptionTimeoutTerminationMode.NoopTermination =>
// do nothing
case StreamSubscriptionTimeoutTerminationMode.WarnTermination =>
- materializer.logger.warning("Substream subscription timeout triggered after {} in prefixAndTail({}).",
- timeout,
- n)
+ materializer.logger.warning(
+ "Substream subscription timeout triggered after {} in prefixAndTail({}).",
+ timeout,
+ n)
}
}
@@ -251,9 +252,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class GroupBy[T, K](val maxSubstreams: Int,
- val keyFor: T => K,
- val allowClosedSubstreamRecreation: Boolean = false)
+@InternalApi private[akka] final class GroupBy[T, K](
+ val maxSubstreams: Int,
+ val keyFor: T => K,
+ val allowClosedSubstreamRecreation: Boolean = false)
extends GraphStage[FlowShape[T, Source[T, NotUsed]]] {
val in: Inlet[T] = Inlet("GroupBy.in")
val out: Outlet[Source[T, NotUsed]] = Outlet("GroupBy.out")
@@ -456,21 +458,24 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
/** Splits after the current element. The current element will be the last element in the current substream. */
case object SplitAfter extends SplitDecision
- def when[T](p: T => Boolean,
- substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] =
+ def when[T](
+ p: T => Boolean,
+ substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] =
new Split(Split.SplitBefore, p, substreamCancelStrategy)
- def after[T](p: T => Boolean,
- substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] =
+ def after[T](
+ p: T => Boolean,
+ substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] =
new Split(Split.SplitAfter, p, substreamCancelStrategy)
}
/**
* INTERNAL API
*/
-@InternalApi private[akka] final class Split[T](val decision: Split.SplitDecision,
- val p: T => Boolean,
- val substreamCancelStrategy: SubstreamCancelStrategy)
+@InternalApi private[akka] final class Split[T](
+ val decision: Split.SplitDecision,
+ val p: T => Boolean,
+ val substreamCancelStrategy: SubstreamCancelStrategy)
extends GraphStage[FlowShape[T, Source[T, NotUsed]]] {
val in: Inlet[T] = Inlet("Split.in")
val out: Outlet[Source[T, NotUsed]] = Outlet("Split.out")
@@ -495,20 +500,21 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
timeout = ActorMaterializerHelper.downcast(interpreter.materializer).settings.subscriptionTimeoutSettings.timeout
}
- setHandler(out,
- new OutHandler {
- override def onPull(): Unit = {
- if (substreamSource eq null) {
- //can be already pulled from substream in case split after
- if (!hasBeenPulled(in)) pull(in)
- } else if (substreamWaitingToBePushed) pushSubstreamSource()
- }
+ setHandler(
+ out,
+ new OutHandler {
+ override def onPull(): Unit = {
+ if (substreamSource eq null) {
+ //can be already pulled from substream in case split after
+ if (!hasBeenPulled(in)) pull(in)
+ } else if (substreamWaitingToBePushed) pushSubstreamSource()
+ }
- override def onDownstreamFinish(): Unit = {
- // If the substream is already cancelled or it has not been handed out, we can go away
- if ((substreamSource eq null) || substreamWaitingToBePushed || substreamCancelled) completeStage()
- }
- })
+ override def onDownstreamFinish(): Unit = {
+ // If the substream is already cancelled or it has not been handed out, we can go away
+ if ((substreamSource eq null) || substreamWaitingToBePushed || substreamCancelled) completeStage()
+ }
+ })
val initInHandler = new InHandler {
override def onPush(): Unit = {
@@ -773,9 +779,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource
}
def timeout(d: FiniteDuration): Boolean =
- status.compareAndSet(null,
- ActorSubscriberMessage.OnError(
- new SubscriptionTimeoutException(s"Substream Source has not been materialized in $d")))
+ status.compareAndSet(
+ null,
+ ActorSubscriberMessage.OnError(
+ new SubscriptionTimeoutException(s"Substream Source has not been materialized in $d")))
override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with OutHandler {
setHandler(out, this)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala b/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala
index 831daad971..1353307b75 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala
@@ -20,11 +20,12 @@ import scala.util.{ Failure, Success, Try }
/** INTERNAL API */
@InternalApi private[akka] object FileSubscriber {
- def props(f: Path,
- completionPromise: Promise[IOResult],
- bufSize: Int,
- startPosition: Long,
- openOptions: Set[OpenOption]) = {
+ def props(
+ f: Path,
+ completionPromise: Promise[IOResult],
+ bufSize: Int,
+ startPosition: Long,
+ openOptions: Set[OpenOption]) = {
require(bufSize > 0, "buffer size must be > 0")
require(startPosition >= 0, s"startPosition must be >= 0 (was $startPosition)")
Props(classOf[FileSubscriber], f, completionPromise, bufSize, startPosition, openOptions).withDeploy(Deploy.local)
@@ -32,11 +33,12 @@ import scala.util.{ Failure, Success, Try }
}
/** INTERNAL API */
-@InternalApi private[akka] class FileSubscriber(f: Path,
- completionPromise: Promise[IOResult],
- bufSize: Int,
- startPosition: Long,
- openOptions: Set[OpenOption])
+@InternalApi private[akka] class FileSubscriber(
+ f: Path,
+ completionPromise: Promise[IOResult],
+ bufSize: Int,
+ startPosition: Long,
+ openOptions: Set[OpenOption])
extends akka.stream.actor.ActorSubscriber
with ActorLogging {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala b/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala
index 647d748ea3..a7a81e595f 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala
@@ -21,11 +21,12 @@ import scala.concurrent.{ Future, Promise }
* Creates simple synchronous Sink which writes all incoming elements to the given file
* (creating it before hand if necessary).
*/
-@InternalApi private[akka] final class FileSink(f: Path,
- startPosition: Long,
- options: immutable.Set[OpenOption],
- val attributes: Attributes,
- shape: SinkShape[ByteString])
+@InternalApi private[akka] final class FileSink(
+ f: Path,
+ startPosition: Long,
+ options: immutable.Set[OpenOption],
+ val attributes: Attributes,
+ shape: SinkShape[ByteString])
extends SinkModule[ByteString, Future[IOResult]](shape) {
override protected def label: String = s"FileSink($f, $options)"
@@ -54,10 +55,11 @@ import scala.concurrent.{ Future, Promise }
* INTERNAL API
* Creates simple synchronous Sink which writes all incoming elements to the output stream.
*/
-@InternalApi private[akka] final class OutputStreamSink(createOutput: () => OutputStream,
- val attributes: Attributes,
- shape: SinkShape[ByteString],
- autoFlush: Boolean)
+@InternalApi private[akka] final class OutputStreamSink(
+ createOutput: () => OutputStream,
+ val attributes: Attributes,
+ shape: SinkShape[ByteString],
+ autoFlush: Boolean)
extends SinkModule[ByteString, Future[IOResult]](shape) {
override def create(context: MaterializationContext) = {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala
index bc0c062a51..a704eb0928 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala
@@ -144,10 +144,11 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition:
* INTERNAL API
* Source backed by the given input stream.
*/
-@InternalApi private[akka] final class InputStreamSource(createInputStream: () => InputStream,
- chunkSize: Int,
- val attributes: Attributes,
- shape: SourceShape[ByteString])
+@InternalApi private[akka] final class InputStreamSource(
+ createInputStream: () => InputStream,
+ chunkSize: Int,
+ val attributes: Attributes,
+ shape: SourceShape[ByteString])
extends SourceModule[ByteString, Future[IOResult]](shape) {
override def create(context: MaterializationContext) = {
val materializer = ActorMaterializerHelper.downcast(context.materializer)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala
index ca06a19908..57bcc2231e 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala
@@ -29,9 +29,10 @@ import scala.util.{ Failure, Success }
}
/** INTERNAL API */
-@InternalApi private[akka] class InputStreamPublisher(is: InputStream,
- completionPromise: Promise[IOResult],
- chunkSize: Int)
+@InternalApi private[akka] class InputStreamPublisher(
+ is: InputStream,
+ completionPromise: Promise[IOResult],
+ chunkSize: Int)
extends akka.stream.actor.ActorPublisher[ByteString]
with ActorLogging {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala
index 3392978c70..1eeadc1dfe 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala
@@ -111,9 +111,10 @@ private[stream] object InputStreamSinkStage {
* INTERNAL API
* InputStreamAdapter that interacts with InputStreamSinkStage
*/
-@InternalApi private[akka] class InputStreamAdapter(sharedBuffer: BlockingQueue[StreamToAdapterMessage],
- sendToStage: (AdapterToStageMessage) => Unit,
- readTimeout: FiniteDuration)
+@InternalApi private[akka] class InputStreamAdapter(
+ sharedBuffer: BlockingQueue[StreamToAdapterMessage],
+ sendToStage: (AdapterToStageMessage) => Unit,
+ readTimeout: FiniteDuration)
extends InputStream {
var isInitialized = false
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala
index 7362a5c2dd..1e27b811d0 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala
@@ -66,9 +66,10 @@ final private[stream] class OutputStreamSourceStage(writeTimeout: FiniteDuration
}
}
-private[akka] class OutputStreamAdapter(unfulfilledDemand: Semaphore,
- sendToStage: AsyncCallback[AdapterToStageMessage],
- writeTimeout: FiniteDuration)
+private[akka] class OutputStreamAdapter(
+ unfulfilledDemand: Semaphore,
+ sendToStage: AsyncCallback[AdapterToStageMessage],
+ writeTimeout: FiniteDuration)
extends OutputStream {
@scala.throws(classOf[IOException])
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala
index 09b435b0f2..eb28d132ba 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala
@@ -26,10 +26,11 @@ import scala.util.{ Failure, Success }
}
/** INTERNAL API */
-@InternalApi private[akka] class OutputStreamSubscriber(os: OutputStream,
- completionPromise: Promise[IOResult],
- bufSize: Int,
- autoFlush: Boolean)
+@InternalApi private[akka] class OutputStreamSubscriber(
+ os: OutputStream,
+ completionPromise: Promise[IOResult],
+ bufSize: Int,
+ autoFlush: Boolean)
extends akka.stream.actor.ActorSubscriber
with ActorLogging {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala
index b5c666c1a9..670cdcd91a 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala
@@ -30,14 +30,15 @@ import scala.concurrent.{ Future, Promise }
/**
* INTERNAL API
*/
-@InternalApi private[stream] class ConnectionSourceStage(val tcpManager: ActorRef,
- val endpoint: InetSocketAddress,
- val backlog: Int,
- val options: immutable.Traversable[SocketOption],
- val halfClose: Boolean,
- val idleTimeout: Duration,
- val bindShutdownTimeout: FiniteDuration,
- val ioSettings: IOSettings)
+@InternalApi private[stream] class ConnectionSourceStage(
+ val tcpManager: ActorRef,
+ val endpoint: InetSocketAddress,
+ val backlog: Int,
+ val options: immutable.Traversable[SocketOption],
+ val halfClose: Boolean,
+ val idleTimeout: Duration,
+ val bindShutdownTimeout: FiniteDuration,
+ val ioSettings: IOSettings)
extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[StreamTcp.ServerBinding]] {
import ConnectionSourceStage._
@@ -121,11 +122,12 @@ import scala.concurrent.{ Future, Promise }
val tcpFlow =
Flow
.fromGraph(
- new IncomingConnectionStage(connection,
- connected.remoteAddress,
- halfClose,
- ioSettings,
- () => connectionFlowsAwaitingInitialization.decrementAndGet()))
+ new IncomingConnectionStage(
+ connection,
+ connected.remoteAddress,
+ halfClose,
+ ioSettings,
+ () => connectionFlowsAwaitingInitialization.decrementAndGet()))
.via(detacher[ByteString]) // must read ahead for proper completions
// FIXME: Previous code was wrong, must add new tests
@@ -185,11 +187,12 @@ private[stream] object ConnectionSourceStage {
def halfClose: Boolean
def ioSettings: IOSettings
}
- case class Outbound(manager: ActorRef,
- connectCmd: Connect,
- localAddressPromise: Promise[InetSocketAddress],
- halfClose: Boolean,
- ioSettings: IOSettings)
+ case class Outbound(
+ manager: ActorRef,
+ connectCmd: Connect,
+ localAddressPromise: Promise[InetSocketAddress],
+ halfClose: Boolean,
+ ioSettings: IOSettings)
extends TcpRole
case class Inbound(connection: ActorRef, halfClose: Boolean, ioSettings: IOSettings, registerCallback: () => Unit)
@@ -202,9 +205,10 @@ private[stream] object ConnectionSourceStage {
* to attach an extra, fused buffer to the end of this flow. Keeping this stage non-detached makes it much simpler and
* easier to maintain and understand.
*/
- class TcpStreamLogic(val shape: FlowShape[ByteString, ByteString],
- val role: TcpRole,
- remoteAddress: InetSocketAddress)
+ class TcpStreamLogic(
+ val shape: FlowShape[ByteString, ByteString],
+ val role: TcpRole,
+ remoteAddress: InetSocketAddress)
extends GraphStageLogic(shape) {
implicit def self: ActorRef = stageActor.ref
@@ -327,38 +331,39 @@ private[stream] object ConnectionSourceStage {
}
}
- setHandler(bytesIn,
- new InHandler {
- override def onPush(): Unit = {
- val elem = grab(bytesIn)
- ReactiveStreamsCompliance.requireNonNullElement(elem)
- if (writeInProgress) {
- writeBuffer = writeBuffer ++ elem
- } else {
- connection ! Write(writeBuffer ++ elem, WriteAck)
- writeInProgress = true
- writeBuffer = ByteString.empty
- }
- if (writeBuffer.size < writeBufferSize)
- pull(bytesIn)
+ setHandler(
+ bytesIn,
+ new InHandler {
+ override def onPush(): Unit = {
+ val elem = grab(bytesIn)
+ ReactiveStreamsCompliance.requireNonNullElement(elem)
+ if (writeInProgress) {
+ writeBuffer = writeBuffer ++ elem
+ } else {
+ connection ! Write(writeBuffer ++ elem, WriteAck)
+ writeInProgress = true
+ writeBuffer = ByteString.empty
+ }
+ if (writeBuffer.size < writeBufferSize)
+ pull(bytesIn)
- }
+ }
- override def onUpstreamFinish(): Unit =
- closeConnection()
+ override def onUpstreamFinish(): Unit =
+ closeConnection()
- override def onUpstreamFailure(ex: Throwable): Unit = {
- if (connection != null) {
- if (interpreter.log.isDebugEnabled) {
- val msg = "Aborting tcp connection to {} because of upstream failure: {}"
+ override def onUpstreamFailure(ex: Throwable): Unit = {
+ if (connection != null) {
+ if (interpreter.log.isDebugEnabled) {
+ val msg = "Aborting tcp connection to {} because of upstream failure: {}"
- if (ex.getStackTrace.isEmpty) interpreter.log.debug(msg, remoteAddress, ex)
- else interpreter.log.debug(msg + "\n{}", remoteAddress, ex, ex.getStackTrace.mkString("\n"))
- }
- connection ! Abort
- } else fail(ex)
- }
- })
+ if (ex.getStackTrace.isEmpty) interpreter.log.debug(msg, remoteAddress, ex)
+ else interpreter.log.debug(msg + "\n{}", remoteAddress, ex, ex.getStackTrace.mkString("\n"))
+ }
+ connection ! Abort
+ } else fail(ex)
+ }
+ })
/** Fail stage and report to localAddressPromise if still possible */
private def fail(ex: Throwable): Unit = {
@@ -382,11 +387,12 @@ private[stream] object ConnectionSourceStage {
/**
* INTERNAL API
*/
-@InternalApi private[akka] class IncomingConnectionStage(connection: ActorRef,
- remoteAddress: InetSocketAddress,
- halfClose: Boolean,
- ioSettings: IOSettings,
- registerCallback: () => Unit)
+@InternalApi private[akka] class IncomingConnectionStage(
+ connection: ActorRef,
+ remoteAddress: InetSocketAddress,
+ halfClose: Boolean,
+ ioSettings: IOSettings,
+ registerCallback: () => Unit)
extends GraphStage[FlowShape[ByteString, ByteString]] {
import TcpConnectionStage._
@@ -410,13 +416,14 @@ private[stream] object ConnectionSourceStage {
/**
* INTERNAL API
*/
-@InternalApi private[stream] class OutgoingConnectionStage(manager: ActorRef,
- remoteAddress: InetSocketAddress,
- localAddress: Option[InetSocketAddress] = None,
- options: immutable.Traversable[SocketOption] = Nil,
- halfClose: Boolean = true,
- connectTimeout: Duration = Duration.Inf,
- ioSettings: IOSettings)
+@InternalApi private[stream] class OutgoingConnectionStage(
+ manager: ActorRef,
+ remoteAddress: InetSocketAddress,
+ localAddress: Option[InetSocketAddress] = None,
+ options: immutable.Traversable[SocketOption] = Nil,
+ halfClose: Boolean = true,
+ connectTimeout: Duration = Duration.Inf,
+ ioSettings: IOSettings)
extends GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Future[StreamTcp.OutgoingConnection]] {
import TcpConnectionStage._
@@ -434,16 +441,20 @@ private[stream] object ConnectionSourceStage {
}
val localAddressPromise = Promise[InetSocketAddress]
- val logic = new TcpStreamLogic(shape,
- Outbound(manager,
- Connect(remoteAddress, localAddress, options, connTimeout, pullMode = true),
- localAddressPromise,
- halfClose,
- ioSettings),
- remoteAddress)
+ val logic = new TcpStreamLogic(
+ shape,
+ Outbound(
+ manager,
+ Connect(remoteAddress, localAddress, options, connTimeout, pullMode = true),
+ localAddressPromise,
+ halfClose,
+ ioSettings),
+ remoteAddress)
- (logic,
- localAddressPromise.future.map(OutgoingConnection(remoteAddress, _))(ExecutionContexts.sameThreadExecutionContext))
+ (
+ logic,
+ localAddressPromise.future.map(OutgoingConnection(remoteAddress, _))(
+ ExecutionContexts.sameThreadExecutionContext))
}
override def toString = s"TCP-to($remoteAddress)"
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala
index 0c341371f5..3728747b86 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala
@@ -69,9 +69,10 @@ import scala.annotation.tailrec
val MinBufferSize = 1024
@tailrec
- def drainDeflater(deflater: Deflater,
- buffer: Array[Byte],
- result: ByteStringBuilder = new ByteStringBuilder()): ByteString = {
+ def drainDeflater(
+ deflater: Deflater,
+ buffer: Array[Byte],
+ result: ByteStringBuilder = new ByteStringBuilder()): ByteString = {
val len = deflater.deflate(buffer)
if (len > 0) {
result ++= ByteString.fromArray(buffer, 0, len)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala
index f5ce7c2284..0196e3591b 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala
@@ -89,9 +89,10 @@ private[stream] final class SinkRefStageImpl[In] private[akka] (val initialPartn
initialPartnerRef match {
case OptionVal.Some(ref) =>
// this will set the `partnerRef`
- observeAndValidateSender(ref,
- "Illegal initialPartnerRef! This may be a bug, please report your " +
- "usage and complete stack trace on the issue tracker: https://github.com/akka/akka")
+ observeAndValidateSender(
+ ref,
+ "Illegal initialPartnerRef! This may be a bug, please report your " +
+ "usage and complete stack trace on the issue tracker: https://github.com/akka/akka")
tryPull()
case OptionVal.None =>
// only schedule timeout timer if partnerRef has not been resolved yet (i.e. if this instance of the Actor
@@ -99,9 +100,10 @@ private[stream] final class SinkRefStageImpl[In] private[akka] (val initialPartn
scheduleOnce(SubscriptionTimeoutTimerKey, subscriptionTimeout.timeout)
}
- log.debug("Created SinkRef, pointing to remote Sink receiver: {}, local worker: {}",
- initialPartnerRef,
- self.ref)
+ log.debug(
+ "Created SinkRef, pointing to remote Sink receiver: {}, local worker: {}",
+ initialPartnerRef,
+ self.ref)
promise.success(SourceRefImpl(self.ref))
}
@@ -127,9 +129,10 @@ private[stream] final class SinkRefStageImpl[In] private[akka] (val initialPartn
if (remoteCumulativeDemandReceived < d) {
remoteCumulativeDemandReceived = d
- log.debug("Received cumulative demand [{}], consumable demand: [{}]",
- StreamRefsProtocol.CumulativeDemand(d),
- remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed)
+ log.debug(
+ "Received cumulative demand [{}], consumable demand: [{}]",
+ StreamRefsProtocol.CumulativeDemand(d),
+ remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed)
}
tryPull()
diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala
index 29e7a301ec..89a578405e 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala
@@ -91,8 +91,9 @@ private[stream] final class SourceRefStageImpl[Out](val initialPartnerRef: Optio
self = getStageActor(initialReceive)
log.debug("[{}] Allocated receiver: {}", stageActorName, self.ref)
if (initialPartnerRef.isDefined) // this will set the partnerRef
- observeAndValidateSender(initialPartnerRef.get,
- "Illegal initialPartnerRef! This would be a bug in the SourceRef usage or impl.")
+ observeAndValidateSender(
+ initialPartnerRef.get,
+ "Illegal initialPartnerRef! This would be a bug in the SourceRef usage or impl.")
promise.success(SinkRefImpl(self.ref))
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala
index 1d242d1a18..123b01c211 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala
@@ -46,9 +46,10 @@ object BidiFlow {
* }}}
*
*/
- def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](flow1: Graph[FlowShape[I1, O1], M1],
- flow2: Graph[FlowShape[I2, O2], M2],
- combine: function.Function2[M1, M2, M]): BidiFlow[I1, O1, I2, O2, M] = {
+ def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](
+ flow1: Graph[FlowShape[I1, O1], M1],
+ flow2: Graph[FlowShape[I2, O2], M2],
+ combine: function.Function2[M1, M2, M]): BidiFlow[I1, O1, I2, O2, M] = {
new BidiFlow(scaladsl.BidiFlow.fromFlowsMat(flow1, flow2)(combinerToScala(combine)))
}
@@ -70,16 +71,18 @@ object BidiFlow {
* }}}
*
*/
- def fromFlows[I1, O1, I2, O2, M1, M2](flow1: Graph[FlowShape[I1, O1], M1],
- flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
+ def fromFlows[I1, O1, I2, O2, M1, M2](
+ flow1: Graph[FlowShape[I1, O1], M1],
+ flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
new BidiFlow(scaladsl.BidiFlow.fromFlows(flow1, flow2))
/**
* Create a BidiFlow where the top and bottom flows are just one simple mapping
* operator each, expressed by the two functions.
*/
- def fromFunctions[I1, O1, I2, O2](top: function.Function[I1, O1],
- bottom: function.Function[I2, O2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
+ def fromFunctions[I1, O1, I2, O2](
+ top: function.Function[I1, O1],
+ bottom: function.Function[I2, O2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
new BidiFlow(scaladsl.BidiFlow.fromFunctions(top.apply _, bottom.apply _))
/**
@@ -159,8 +162,9 @@ final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2
* The `combine` function is used to compose the materialized values of this flow and that
* flow into the materialized value of the resulting BidiFlow.
*/
- def atop[OO1, II2, Mat2, M](bidi: BidiFlow[O1, OO1, II2, I2, Mat2],
- combine: function.Function2[Mat, Mat2, M]): BidiFlow[I1, OO1, II2, O2, M] =
+ def atop[OO1, II2, Mat2, M](
+ bidi: BidiFlow[O1, OO1, II2, I2, Mat2],
+ combine: function.Function2[Mat, Mat2, M]): BidiFlow[I1, OO1, II2, O2, M] =
new BidiFlow(delegate.atopMat(bidi.asScala)(combinerToScala(combine)))
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala
index 218ec3cf3e..877b435bfe 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala
@@ -113,9 +113,10 @@ object FileIO {
* @param options File open options, see [[java.nio.file.StandardOpenOption]]
* @param startPosition startPosition the start position to read from, defaults to 0
*/
- def toPath[Opt <: OpenOption](f: Path,
- options: util.Set[Opt],
- startPosition: Long): javadsl.Sink[ByteString, CompletionStage[IOResult]] =
+ def toPath[Opt <: OpenOption](
+ f: Path,
+ options: util.Set[Opt],
+ startPosition: Long): javadsl.Sink[ByteString, CompletionStage[IOResult]] =
new Sink(scaladsl.FileIO.toPath(f, options.asScala.toSet, startPosition).toCompletionStage())
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala
index 653148caec..6fe6c12e76 100755
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala
@@ -112,9 +112,10 @@ object Flow {
* The `combine` function is used to compose the materialized values of the `sink` and `source`
* into the materialized value of the resulting [[Flow]].
*/
- def fromSinkAndSourceMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1],
- source: Graph[SourceShape[O], M2],
- combine: function.Function2[M1, M2, M]): Flow[I, O, M] =
+ def fromSinkAndSourceMat[I, O, M1, M2, M](
+ sink: Graph[SinkShape[I], M1],
+ source: Graph[SourceShape[O], M2],
+ combine: function.Function2[M1, M2, M]): Flow[I, O, M] =
new Flow(scaladsl.Flow.fromSinkAndSourceMat(sink, source)(combinerToScala(combine)))
/**
@@ -177,8 +178,9 @@ object Flow {
*
* See also [[fromSinkAndSourceCoupledMat]] when access to materialized values of the parameters is needed.
*/
- def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _],
- source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] =
+ def fromSinkAndSourceCoupled[I, O](
+ sink: Graph[SinkShape[I], _],
+ source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] =
new Flow(scaladsl.Flow.fromSinkAndSourceCoupled(sink, source))
/**
@@ -205,9 +207,10 @@ object Flow {
* The `combine` function is used to compose the materialized values of the `sink` and `source`
* into the materialized value of the resulting [[Flow]].
*/
- def fromSinkAndSourceCoupledMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1],
- source: Graph[SourceShape[O], M2],
- combine: function.Function2[M1, M2, M]): Flow[I, O, M] =
+ def fromSinkAndSourceCoupledMat[I, O, M1, M2, M](
+ sink: Graph[SinkShape[I], M1],
+ source: Graph[SourceShape[O], M2],
+ combine: function.Function2[M1, M2, M]): Flow[I, O, M] =
new Flow(scaladsl.Flow.fromSinkAndSourceCoupledMat(sink, source)(combinerToScala(combine)))
/**
@@ -225,10 +228,12 @@ object Flow {
* '''Cancels when''' downstream cancels
*/
@Deprecated
- @deprecated("Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)",
- "2.5.12")
- def lazyInit[I, O, M](flowFactory: function.Function[I, CompletionStage[Flow[I, O, M]]],
- fallback: function.Creator[M]): Flow[I, O, M] = {
+ @deprecated(
+ "Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)",
+ "2.5.12")
+ def lazyInit[I, O, M](
+ flowFactory: function.Function[I, CompletionStage[Flow[I, O, M]]],
+ fallback: function.Creator[M]): Flow[I, O, M] = {
import scala.compat.java8.FutureConverters._
val sflow = scaladsl.Flow
.fromGraph(new LazyFlow[I, O, M](t =>
@@ -339,8 +344,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values.
*/
- def viaMat[T, M, M2](flow: Graph[FlowShape[Out, T], M],
- combine: function.Function2[Mat, M, M2]): javadsl.Flow[In, T, M2] =
+ def viaMat[T, M, M2](
+ flow: Graph[FlowShape[Out, T], M],
+ combine: function.Function2[Mat, M, M2]): javadsl.Flow[In, T, M2] =
new Flow(delegate.viaMat(flow)(combinerToScala(combine)))
/**
@@ -420,8 +426,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values.
*/
- def joinMat[M, M2](flow: Graph[FlowShape[Out, In], M],
- combine: function.Function2[Mat, M, M2]): javadsl.RunnableGraph[M2] =
+ def joinMat[M, M2](
+ flow: Graph[FlowShape[Out, In], M],
+ combine: function.Function2[Mat, M, M2]): javadsl.RunnableGraph[M2] =
RunnableGraph.fromGraph(delegate.joinMat(flow)(combinerToScala(combine)))
/**
@@ -465,8 +472,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* See also [[viaMat]] when access to materialized values of the parameter is needed.
*/
- def joinMat[I2, O2, Mat2, M](bidi: Graph[BidiShape[Out, O2, I2, In], Mat2],
- combine: function.Function2[Mat, Mat2, M]): Flow[I2, O2, M] =
+ def joinMat[I2, O2, Mat2, M](
+ bidi: Graph[BidiShape[Out, O2, I2, In], Mat2],
+ combine: function.Function2[Mat, Mat2, M]): Flow[I2, O2, M] =
new Flow(delegate.joinMat(bidi)(combinerToScala(combine)))
/**
@@ -478,9 +486,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @tparam T materialized type of given Source
* @tparam U materialized type of given Sink
*/
- def runWith[T, U](source: Graph[SourceShape[In], T],
- sink: Graph[SinkShape[Out], U],
- materializer: Materializer): akka.japi.Pair[T, U] = {
+ def runWith[T, U](
+ source: Graph[SourceShape[In], T],
+ sink: Graph[SinkShape[Out], U],
+ materializer: Materializer): akka.japi.Pair[T, U] = {
val (som, sim) = delegate.runWith(source, sink)(materializer)
akka.japi.Pair(som, sim)
}
@@ -1144,9 +1153,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: FiniteDuration): javadsl.Flow[In, java.util.List[Out], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: FiniteDuration): javadsl.Flow[In, java.util.List[Out], Mat] =
new Flow(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava))
/**
@@ -1167,9 +1177,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise
* IllegalArgumentException is thrown.
*/
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: java.time.Duration): javadsl.Flow[In, java.util.List[Out], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: java.time.Duration): javadsl.Flow[In, java.util.List[Out], Mat] =
groupedWeightedWithin(maxWeight, costFn, d.asScala)
/**
@@ -1443,8 +1454,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*
*/
- def recoverWith(clazz: Class[_ <: Throwable],
- supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] =
+ def recoverWith(
+ clazz: Class[_ <: Throwable],
+ supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] =
recoverWith {
case elem if clazz.isInstance(elem) => supplier.get()
}
@@ -1474,8 +1486,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param attempts Maximum number of retries or -1 to retry indefinitely
* @param pf Receives the failure cause and returns the new Source to be materialized if any
*/
- def recoverWithRetries(attempts: Int,
- pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] =
+ def recoverWithRetries(
+ attempts: Int,
+ pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.recoverWithRetries(attempts, pf))
/**
@@ -1504,9 +1517,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param clazz the class object of the failure cause
* @param supplier supply the new Source to be materialized
*/
- def recoverWithRetries(attempts: Int,
- clazz: Class[_ <: Throwable],
- supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] =
+ def recoverWithRetries(
+ attempts: Int,
+ clazz: Class[_ <: Throwable],
+ supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] =
recoverWithRetries(attempts, {
case elem if clazz.isInstance(elem) => supplier.get()
})
@@ -1606,8 +1620,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate
*
*/
- def conflateWithSeed[S](seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] =
+ def conflateWithSeed[S](
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] =
new Flow(delegate.conflateWithSeed(seed.apply)(aggregate.apply))
/**
@@ -1663,9 +1678,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate
*/
- def batch[S](max: Long,
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] =
+ def batch[S](
+ max: Long,
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] =
new Flow(delegate.batch(max, seed.apply)(aggregate.apply))
/**
@@ -1696,10 +1712,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new batch
*/
- def batchWeighted[S](max: Long,
- costFn: function.Function[Out, java.lang.Long],
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] =
+ def batchWeighted[S](
+ max: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] =
new Flow(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply))
/**
@@ -1784,8 +1801,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param initial The initial element to be emitted, in case upstream is able to stall the entire stream.
* @see [[#expand]]
*/
- def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
- initial: Out @uncheckedVariance): javadsl.Flow[In, Out, Mat] =
+ def extrapolate(
+ extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
+ initial: Out @uncheckedVariance): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial)))
/**
@@ -1889,9 +1907,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* @param allowClosedSubstreamRecreation enables recreation of already closed substreams if elements with their
* corresponding keys arrive after completion
*/
- def groupBy[K](maxSubstreams: Int,
- f: function.Function[Out, K],
- allowClosedSubstreamRecreation: Boolean): SubFlow[In, Out, Mat] =
+ def groupBy[K](
+ maxSubstreams: Int,
+ f: function.Function[Out, K],
+ allowClosedSubstreamRecreation: Boolean): SubFlow[In, Out, Mat] =
new SubFlow(delegate.groupBy(maxSubstreams, f.apply, allowClosedSubstreamRecreation))
/**
@@ -2103,8 +2122,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#concat]]
*/
- def concatMat[M, M2](that: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
+ def concatMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
new Flow(delegate.concatMat(that)(combinerToScala(matF)))
/**
@@ -2143,8 +2163,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#prepend]]
*/
- def prependMat[M, M2](that: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
+ def prependMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
new Flow(delegate.prependMat(that)(combinerToScala(matF)))
/**
@@ -2182,8 +2203,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#orElse]]
*/
- def orElseMat[M2, M3](secondary: Graph[SourceShape[Out], M2],
- matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
+ def orElseMat[M2, M3](
+ secondary: Graph[SourceShape[Out], M2],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
new Flow(delegate.orElseMat(secondary)(combinerToScala(matF)))
/**
@@ -2214,8 +2236,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#alsoTo]]
*/
- def alsoToMat[M2, M3](that: Graph[SinkShape[Out], M2],
- matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
+ def alsoToMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
new Flow(delegate.alsoToMat(that)(combinerToScala(matF)))
/**
@@ -2242,9 +2265,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values.
*/
- def divertToMat[M2, M3](that: Graph[SinkShape[Out], M2],
- when: function.Predicate[Out],
- matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
+ def divertToMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ when: function.Predicate[Out],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
new Flow(delegate.divertToMat(that, when.test)(combinerToScala(matF)))
/**
@@ -2278,8 +2302,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#wireTap]]
*/
- def wireTapMat[M2, M3](that: Graph[SinkShape[Out], M2],
- matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
+ def wireTapMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
new Flow(delegate.wireTapMat(that)(combinerToScala(matF)))
/**
@@ -2347,9 +2372,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#interleave]]
*/
- def interleaveMat[M, M2](that: Graph[SourceShape[Out], M],
- segmentSize: Int,
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
+ def interleaveMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ segmentSize: Int,
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
interleaveMat(that, segmentSize, eagerClose = false, matF)
/**
@@ -2368,10 +2394,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#interleave]]
*/
- def interleaveMat[M, M2](that: Graph[SourceShape[Out], M],
- segmentSize: Int,
- eagerClose: Boolean,
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
+ def interleaveMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ segmentSize: Int,
+ eagerClose: Boolean,
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
new Flow(delegate.interleaveMat(that, segmentSize, eagerClose)(combinerToScala(matF)))
/**
@@ -2413,8 +2440,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#merge]]
*/
- def mergeMat[M, M2](that: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
+ def mergeMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] =
mergeMat(that, matF, eagerComplete = false)
/**
@@ -2426,9 +2454,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#merge]]
*/
- def mergeMat[M, M2](that: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2],
- eagerComplete: Boolean): javadsl.Flow[In, Out, M2] =
+ def mergeMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2],
+ eagerComplete: Boolean): javadsl.Flow[In, Out, M2] =
new Flow(delegate.mergeMat(that, eagerComplete)(combinerToScala(matF)))
/**
@@ -2461,9 +2490,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#mergeSorted]].
*/
- def mergeSortedMat[Mat2, Mat3](that: Graph[SourceShape[Out], Mat2],
- comp: Comparator[Out],
- matF: function.Function2[Mat, Mat2, Mat3]): javadsl.Flow[In, Out, Mat3] =
+ def mergeSortedMat[Mat2, Mat3](
+ that: Graph[SourceShape[Out], Mat2],
+ comp: Comparator[Out],
+ matF: function.Function2[Mat, Mat2, Mat3]): javadsl.Flow[In, Out, Mat3] =
new Flow(delegate.mergeSortedMat(that)(combinerToScala(matF))(Ordering.comparatorToOrdering(comp)))
/**
@@ -2488,8 +2518,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#zip]]
*/
- def zipMat[T, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] =
+ def zipMat[T, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] =
this.viaMat(
Flow.fromGraph(
GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] {
@@ -2528,8 +2559,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#zipLatest]]
*/
- def zipLatestMat[T, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] =
+ def zipLatestMat[T, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] =
this.viaMat(
Flow.fromGraph(
GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] {
@@ -2553,8 +2585,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] =
new Flow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -2566,9 +2599,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#zipWith]]
*/
- def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M],
- combine: function.Function2[Out, Out2, Out3],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] =
+ def zipWithMat[Out2, Out3, M, M2](
+ that: Graph[SourceShape[Out2], M],
+ combine: function.Function2[Out, Out2, Out3],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] =
new Flow(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF)))
/**
@@ -2588,8 +2622,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* '''Cancels when''' downstream cancels
*/
- def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] =
+ def zipLatestWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] =
new Flow(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -2601,9 +2636,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*
* @see [[#zipLatestWith]]
*/
- def zipLatestWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M],
- combine: function.Function2[Out, Out2, Out3],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] =
+ def zipLatestWithMat[Out2, Out3, M, M2](
+ that: Graph[SourceShape[Out2], M],
+ combine: function.Function2[Out, Out2, Out3],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] =
new Flow(delegate.zipLatestWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF)))
/**
@@ -2905,10 +2941,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*
*/
- def throttle(elements: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
+ def throttle(
+ elements: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(elements, per.asScala, maximumBurst, mode))
/**
@@ -2952,11 +2989,12 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def throttle(cost: Int,
- per: FiniteDuration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: FiniteDuration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(cost, per, maximumBurst, costCalculation.apply, mode))
/**
@@ -2991,9 +3029,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- costCalculation: function.Function[Out, Integer]): javadsl.Flow[In, Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: function.Function[Out, Integer]): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(cost, per.asScala, costCalculation.apply))
/**
@@ -3035,11 +3074,12 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode))
/**
@@ -3084,10 +3124,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: FiniteDuration,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: FiniteDuration,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.throttleEven(cost, per, costCalculation.apply, mode))
/**
@@ -3102,10 +3143,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: java.time.Duration,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.Flow[In, Out, Mat] =
throttleEven(cost, per.asScala, costCalculation, mode)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala
index ff937d4dc9..ab9ea93fe0 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala
@@ -129,11 +129,12 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](
*
* @see [[akka.stream.javadsl.Flow.grouped]]
*/
- def grouped(n: Int): FlowWithContext[In,
- CtxIn,
- java.util.List[Out @uncheckedVariance],
- java.util.List[CtxOut @uncheckedVariance],
- Mat] =
+ def grouped(n: Int): FlowWithContext[
+ In,
+ CtxIn,
+ java.util.List[Out @uncheckedVariance],
+ java.util.List[CtxOut @uncheckedVariance],
+ Mat] =
viaScala(_.grouped(n).map(_.asJava).mapContext(_.asJava))
/**
@@ -144,8 +145,9 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](
def map[Out2](f: function.Function[Out, Out2]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] =
viaScala(_.map(f.apply))
- def mapAsync[Out2](parallelism: Int,
- f: function.Function[Out, CompletionStage[Out2]]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] =
+ def mapAsync[Out2](
+ parallelism: Int,
+ f: function.Function[Out, CompletionStage[Out2]]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] =
viaScala(_.mapAsync[Out2](parallelism)(o => f.apply(o).toScala))
/**
@@ -195,11 +197,12 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](
*
* @see [[akka.stream.javadsl.Flow.sliding]]
*/
- def sliding(n: Int, step: Int = 1): FlowWithContext[In,
- CtxIn,
- java.util.List[Out @uncheckedVariance],
- java.util.List[CtxOut @uncheckedVariance],
- Mat] =
+ def sliding(n: Int, step: Int = 1): FlowWithContext[
+ In,
+ CtxIn,
+ java.util.List[Out @uncheckedVariance],
+ java.util.List[CtxOut @uncheckedVariance],
+ Mat] =
viaScala(_.sliding(n, step).map(_.asJava).mapContext(_.asJava))
/**
@@ -207,9 +210,10 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](
*
* @see [[akka.stream.javadsl.Flow.log]]
*/
- def log(name: String,
- extract: function.Function[Out, Any],
- log: LoggingAdapter): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] =
+ def log(
+ name: String,
+ extract: function.Function[Out, Any],
+ log: LoggingAdapter): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] =
viaScala(_.log(name, e => extract.apply(e))(log))
/**
@@ -239,11 +243,11 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](
def asScala: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = delegate
private[this] def viaScala[In2, CtxIn2, Out2, CtxOut2, Mat2](
- f: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] => scaladsl.FlowWithContext[In2,
- CtxIn2,
- Out2,
- CtxOut2,
- Mat2])
- : FlowWithContext[In2, CtxIn2, Out2, CtxOut2, Mat2] =
+ f: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] => scaladsl.FlowWithContext[
+ In2,
+ CtxIn2,
+ Out2,
+ CtxOut2,
+ Mat2]): FlowWithContext[In2, CtxIn2, Out2, CtxOut2, Mat2] =
new FlowWithContext(f(delegate))
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala
index 461c44bb28..304376d808 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala
@@ -47,9 +47,10 @@ object Framing {
* @param maximumFrameLength The maximum length of allowed frames while decoding. If the maximum length is
* exceeded this Flow will fail the stream.
*/
- def delimiter(delimiter: ByteString,
- maximumFrameLength: Int,
- allowTruncation: FramingTruncation): Flow[ByteString, ByteString, NotUsed] = {
+ def delimiter(
+ delimiter: ByteString,
+ maximumFrameLength: Int,
+ allowTruncation: FramingTruncation): Flow[ByteString, ByteString, NotUsed] = {
val truncationAllowed = allowTruncation == FramingTruncation.ALLOW
scaladsl.Framing.delimiter(delimiter, maximumFrameLength, truncationAllowed).asJava
}
@@ -86,10 +87,11 @@ object Framing {
* the length of the size field)
* @param byteOrder The ''ByteOrder'' to be used when decoding the field
*/
- def lengthField(fieldLength: Int,
- fieldOffset: Int,
- maximumFrameLength: Int,
- byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] =
+ def lengthField(
+ fieldLength: Int,
+ fieldOffset: Int,
+ maximumFrameLength: Int,
+ byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] =
scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength, byteOrder).asJava
/**
@@ -111,18 +113,20 @@ object Framing {
* ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise.
*
*/
- def lengthField(fieldLength: Int,
- fieldOffset: Int,
- maximumFrameLength: Int,
- byteOrder: ByteOrder,
- computeFrameSize: akka.japi.function.Function2[Array[Byte], Integer, Integer])
+ def lengthField(
+ fieldLength: Int,
+ fieldOffset: Int,
+ maximumFrameLength: Int,
+ byteOrder: ByteOrder,
+ computeFrameSize: akka.japi.function.Function2[Array[Byte], Integer, Integer])
: Flow[ByteString, ByteString, NotUsed] =
scaladsl.Framing
- .lengthField(fieldLength,
- fieldOffset,
- maximumFrameLength,
- byteOrder,
- (a: Array[Byte], s: Int) => computeFrameSize.apply(a, s))
+ .lengthField(
+ fieldLength,
+ fieldOffset,
+ maximumFrameLength,
+ byteOrder,
+ (a: Array[Byte], s: Int) => computeFrameSize.apply(a, s))
.asJava
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala
index 2bc0dab305..d7c22e6d65 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala
@@ -94,8 +94,9 @@ object MergePreferred {
* @param eagerComplete set to true in order to make this operator eagerly
* finish as soon as one of its inputs completes
*/
- def create[T](secondaryPorts: Int,
- eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
+ def create[T](
+ secondaryPorts: Int,
+ eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
scaladsl.MergePreferred(secondaryPorts, eagerComplete = eagerComplete)
/**
@@ -104,9 +105,10 @@ object MergePreferred {
* @param eagerComplete set to true in order to make this operator eagerly
* finish as soon as one of its inputs completes
*/
- def create[T](clazz: Class[T],
- secondaryPorts: Int,
- eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
+ def create[T](
+ clazz: Class[T],
+ secondaryPorts: Int,
+ eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
create(secondaryPorts, eagerComplete)
}
@@ -157,9 +159,10 @@ object MergePrioritized {
* @param eagerComplete set to true in order to make this operator eagerly
* finish as soon as one of its inputs completes
*/
- def create[T](clazz: Class[T],
- priorities: Array[Int],
- eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
+ def create[T](
+ clazz: Class[T],
+ priorities: Array[Int],
+ eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
create(priorities, eagerComplete)
}
@@ -224,8 +227,9 @@ object Partition {
* @param outputCount number of output ports
* @param partitioner function deciding which output each element will be targeted
*/
- def create[T](outputCount: Int,
- partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] =
+ def create[T](
+ outputCount: Int,
+ partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] =
new scaladsl.Partition(outputCount, partitioner.apply)
/**
@@ -235,9 +239,10 @@ object Partition {
* @param partitioner function deciding which output each element will be targeted
* @param eagerCancel this operator cancels, when any (true) or all (false) of the downstreams cancel
*/
- def create[T](outputCount: Int,
- partitioner: function.Function[T, Integer],
- eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
+ def create[T](
+ outputCount: Int,
+ partitioner: function.Function[T, Integer],
+ eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel)
/**
@@ -247,9 +252,10 @@ object Partition {
* @param outputCount number of output ports
* @param partitioner function deciding which output each element will be targeted
*/
- def create[T](clazz: Class[T],
- outputCount: Int,
- partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] =
+ def create[T](
+ clazz: Class[T],
+ outputCount: Int,
+ partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] =
new scaladsl.Partition(outputCount, partitioner.apply)
/**
@@ -260,10 +266,11 @@ object Partition {
* @param partitioner function deciding which output each element will be targeted
* @param eagerCancel this operator cancels, when any (true) or all (false) of the downstreams cancel
*/
- def create[T](clazz: Class[T],
- outputCount: Int,
- partitioner: function.Function[T, Integer],
- eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
+ def create[T](
+ clazz: Class[T],
+ outputCount: Int,
+ partitioner: function.Function[T, Integer],
+ eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel)
}
@@ -300,9 +307,10 @@ object Balance {
* @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element
* @param eagerCancel if true, balance cancels upstream if any of its downstreams cancel, if false, when all have cancelled.
*/
- def create[T](outputCount: Int,
- waitForAllDownstreams: Boolean,
- eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
+ def create[T](
+ outputCount: Int,
+ waitForAllDownstreams: Boolean,
+ eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
new scaladsl.Balance(outputCount, waitForAllDownstreams, eagerCancel)
/**
@@ -329,9 +337,10 @@ object Balance {
* @param outputCount number of output ports
* @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element
*/
- def create[T](clazz: Class[T],
- outputCount: Int,
- waitForAllDownstreams: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
+ def create[T](
+ clazz: Class[T],
+ outputCount: Int,
+ waitForAllDownstreams: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
create(outputCount, waitForAllDownstreams)
/**
@@ -342,10 +351,11 @@ object Balance {
* @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element
* @param eagerCancel if true, balance cancels upstream if any of its downstreams cancel, if false, when all have cancelled.
*/
- def create[T](clazz: Class[T],
- outputCount: Int,
- waitForAllDownstreams: Boolean,
- eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
+ def create[T](
+ clazz: Class[T],
+ outputCount: Int,
+ waitForAllDownstreams: Boolean,
+ eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
new scaladsl.Balance(outputCount, waitForAllDownstreams, eagerCancel)
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala
index c7c045f20f..8ed2a41fb7 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala
@@ -132,10 +132,11 @@ object PartitionHub {
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
* is backpressured.
*/
- @ApiMayChange def ofStateful[T](clazz: Class[T],
- partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]],
- startAfterNrOfConsumers: Int,
- bufferSize: Int): Sink[T, Source[T, NotUsed]] = {
+ @ApiMayChange def ofStateful[T](
+ clazz: Class[T],
+ partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]],
+ startAfterNrOfConsumers: Int,
+ bufferSize: Int): Sink[T, Source[T, NotUsed]] = {
val p: () => (akka.stream.scaladsl.PartitionHub.ConsumerInfo, T) => Long = () => {
val f = partitioner.get()
(info, elem) => f.applyAsLong(info, elem)
@@ -146,9 +147,10 @@ object PartitionHub {
.asJava
}
- @ApiMayChange def ofStateful[T](clazz: Class[T],
- partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]],
- startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] =
+ @ApiMayChange def ofStateful[T](
+ clazz: Class[T],
+ partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]],
+ startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] =
ofStateful(clazz, partitioner, startAfterNrOfConsumers, akka.stream.scaladsl.PartitionHub.defaultBufferSize)
/**
@@ -180,18 +182,20 @@ object PartitionHub {
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
* is backpressured.
*/
- @ApiMayChange def of[T](clazz: Class[T],
- partitioner: BiFunction[Integer, T, Integer],
- startAfterNrOfConsumers: Int,
- bufferSize: Int): Sink[T, Source[T, NotUsed]] =
+ @ApiMayChange def of[T](
+ clazz: Class[T],
+ partitioner: BiFunction[Integer, T, Integer],
+ startAfterNrOfConsumers: Int,
+ bufferSize: Int): Sink[T, Source[T, NotUsed]] =
akka.stream.scaladsl.PartitionHub
.sink[T]((size, elem) => partitioner.apply(size, elem), startAfterNrOfConsumers, bufferSize)
.mapMaterializedValue(_.asJava)
.asJava
- @ApiMayChange def of[T](clazz: Class[T],
- partitioner: BiFunction[Integer, T, Integer],
- startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] =
+ @ApiMayChange def of[T](
+ clazz: Class[T],
+ partitioner: BiFunction[Integer, T, Integer],
+ startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] =
of(clazz, partitioner, startAfterNrOfConsumers, akka.stream.scaladsl.PartitionHub.defaultBufferSize)
@DoNotInherit @ApiMayChange trait ConsumerInfo {
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala
index 578cb93c1b..31bb038ddc 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala
@@ -43,10 +43,11 @@ object RestartFlow {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def withBackoff[In, Out](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
+ def withBackoff[In, Out](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
akka.stream.scaladsl.RestartFlow
.withBackoff(minBackoff, maxBackoff, randomFactor) { () =>
flowFactory.create().asScala
@@ -77,10 +78,11 @@ object RestartFlow {
* In order to skip this additional delay pass in `0`.
* @param flowFactory A factory for producing the [[Flow]] to wrap.
*/
- def withBackoff[In, Out](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
+ def withBackoff[In, Out](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
import akka.util.JavaDurationConverters._
withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, flowFactory)
}
@@ -112,11 +114,12 @@ object RestartFlow {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def withBackoff[In, Out](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int,
- flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
+ def withBackoff[In, Out](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
akka.stream.scaladsl.RestartFlow
.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () =>
flowFactory.create().asScala
@@ -149,11 +152,12 @@ object RestartFlow {
* Passing `0` will cause no restarts and a negative number will not cap the amount of restarts.
* @param flowFactory A factory for producing the [[Flow]] to wrap.
*/
- def withBackoff[In, Out](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxRestarts: Int,
- flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
+ def withBackoff[In, Out](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
import akka.util.JavaDurationConverters._
withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, flowFactory)
}
@@ -185,11 +189,12 @@ object RestartFlow {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def onFailuresWithBackoff[In, Out](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int,
- flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
+ def onFailuresWithBackoff[In, Out](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
akka.stream.scaladsl.RestartFlow
.onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () =>
flowFactory.create().asScala
@@ -222,11 +227,12 @@ object RestartFlow {
* Passing `0` will cause no restarts and a negative number will not cap the amount of restarts.
* @param flowFactory A factory for producing the [[Flow]] to wrap.
*/
- def onFailuresWithBackoff[In, Out](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxRestarts: Int,
- flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
+ def onFailuresWithBackoff[In, Out](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = {
import akka.util.JavaDurationConverters._
onFailuresWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, flowFactory)
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala
index 7ff521ee5d..07a9a385c8 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala
@@ -44,10 +44,11 @@ object RestartSink {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def withBackoff[T](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
akka.stream.scaladsl.RestartSink
.withBackoff(minBackoff, maxBackoff, randomFactor) { () =>
sinkFactory.create().asScala
@@ -79,10 +80,11 @@ object RestartSink {
* In order to skip this additional delay pass in `0`.
* @param sinkFactory A factory for producing the [[Sink]] to wrap.
*/
- def withBackoff[T](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
import akka.util.JavaDurationConverters._
withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, sinkFactory)
}
@@ -115,11 +117,12 @@ object RestartSink {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def withBackoff[T](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int,
- sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
akka.stream.scaladsl.RestartSink
.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () =>
sinkFactory.create().asScala
@@ -153,11 +156,12 @@ object RestartSink {
* Passing `0` will cause no restarts and a negative number will not cap the amount of restarts.
* @param sinkFactory A factory for producing the [[Sink]] to wrap.
*/
- def withBackoff[T](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxRestarts: Int,
- sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = {
import akka.util.JavaDurationConverters._
withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, sinkFactory)
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala
index e878d6a667..9cc5b015da 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala
@@ -40,10 +40,11 @@ object RestartSource {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def withBackoff[T](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
akka.stream.scaladsl.RestartSource
.withBackoff(minBackoff, maxBackoff, randomFactor) { () =>
sourceFactory.create().asScala
@@ -71,10 +72,11 @@ object RestartSource {
* In order to skip this additional delay pass in `0`.
* @param sourceFactory A factory for producing the [[Source]] to wrap.
*/
- def withBackoff[T](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
import akka.util.JavaDurationConverters._
withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, sourceFactory)
}
@@ -104,11 +106,12 @@ object RestartSource {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def withBackoff[T](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
akka.stream.scaladsl.RestartSource
.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () =>
sourceFactory.create().asScala
@@ -139,11 +142,12 @@ object RestartSource {
* Passing `0` will cause no restarts and a negative number will not cap the amount of restarts.
* @param sourceFactory A factory for producing the [[Source]] to wrap.
*/
- def withBackoff[T](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxRestarts: Int,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def withBackoff[T](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
import akka.util.JavaDurationConverters._
withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, sourceFactory)
}
@@ -170,10 +174,11 @@ object RestartSource {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def onFailuresWithBackoff[T](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def onFailuresWithBackoff[T](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
akka.stream.scaladsl.RestartSource
.onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor) { () =>
sourceFactory.create().asScala
@@ -201,10 +206,11 @@ object RestartSource {
* @param sourceFactory A factory for producing the [[Source]] to wrap.
*
*/
- def onFailuresWithBackoff[T](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def onFailuresWithBackoff[T](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
import akka.util.JavaDurationConverters._
onFailuresWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, sourceFactory)
}
@@ -233,11 +239,12 @@ object RestartSource {
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def onFailuresWithBackoff[T](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def onFailuresWithBackoff[T](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
akka.stream.scaladsl.RestartSource
.onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () =>
sourceFactory.create().asScala
@@ -267,11 +274,12 @@ object RestartSource {
* @param sourceFactory A factory for producing the [[Source]] to wrap.
*
*/
- def onFailuresWithBackoff[T](minBackoff: java.time.Duration,
- maxBackoff: java.time.Duration,
- randomFactor: Double,
- maxRestarts: Int,
- sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
+ def onFailuresWithBackoff[T](
+ minBackoff: java.time.Duration,
+ maxBackoff: java.time.Duration,
+ randomFactor: Double,
+ maxRestarts: Int,
+ sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = {
import akka.util.JavaDurationConverters._
onFailuresWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, sourceFactory)
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala
index 5550bd1972..9afc1f6116 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala
@@ -42,8 +42,9 @@ object Sink {
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure is signaled in the stream.
*/
- def foldAsync[U, In](zero: U,
- f: function.Function2[U, In, CompletionStage[U]]): javadsl.Sink[In, CompletionStage[U]] =
+ def foldAsync[U, In](
+ zero: U,
+ f: function.Function2[U, In, CompletionStage[U]]): javadsl.Sink[In, CompletionStage[U]] =
new Sink(scaladsl.Sink.foldAsync[U, In](zero)(f(_, _).toScala).toCompletionStage())
/**
@@ -252,11 +253,12 @@ object Sink {
* When the stream is completed with failure - result of `onFailureMessage(throwable)`
* message will be sent to the destination actor.
*/
- def actorRefWithAck[In](ref: ActorRef,
- onInitMessage: Any,
- ackMessage: Any,
- onCompleteMessage: Any,
- onFailureMessage: function.Function[Throwable, Any]): Sink[In, NotUsed] =
+ def actorRefWithAck[In](
+ ref: ActorRef,
+ onInitMessage: Any,
+ ackMessage: Any,
+ onCompleteMessage: Any,
+ onFailureMessage: function.Function[Throwable, Any]): Sink[In, NotUsed] =
new Sink(
scaladsl.Sink.actorRefWithAck[In](ref, onInitMessage, ackMessage, onCompleteMessage, onFailureMessage.apply _))
@@ -329,12 +331,14 @@ object Sink {
@deprecated(
"Use lazyInitAsync instead. (lazyInitAsync no more needs a fallback function and the materialized value more clearly indicates if the internal sink was materialized or not.)",
"2.5.11")
- def lazyInit[T, M](sinkFactory: function.Function[T, CompletionStage[Sink[T, M]]],
- fallback: function.Creator[M]): Sink[T, CompletionStage[M]] =
+ def lazyInit[T, M](
+ sinkFactory: function.Function[T, CompletionStage[Sink[T, M]]],
+ fallback: function.Creator[M]): Sink[T, CompletionStage[M]] =
new Sink(
scaladsl.Sink
- .lazyInit[T, M](t => sinkFactory.apply(t).toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext),
- () => fallback.create())
+ .lazyInit[T, M](
+ t => sinkFactory.apply(t).toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext),
+ () => fallback.create())
.mapMaterializedValue(_.toJava))
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala
index 0e47301df6..df8e6a0553 100755
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala
@@ -352,10 +352,11 @@ object Source {
/**
* Combines several sources with fan-in strategy like `Merge` or `Concat` and returns `Source`.
*/
- def combine[T, U](first: Source[T, _ <: Any],
- second: Source[T, _ <: Any],
- rest: java.util.List[Source[T, _ <: Any]],
- strategy: function.Function[java.lang.Integer, _ <: Graph[UniformFanInShape[T, U], NotUsed]])
+ def combine[T, U](
+ first: Source[T, _ <: Any],
+ second: Source[T, _ <: Any],
+ rest: java.util.List[Source[T, _ <: Any]],
+ strategy: function.Function[java.lang.Integer, _ <: Graph[UniformFanInShape[T, U], NotUsed]])
: Source[U, NotUsed] = {
val seq = if (rest != null) Util.immutableSeq(rest).map(_.asScala) else immutable.Seq()
new Source(scaladsl.Source.combine(first.asScala, second.asScala, seq: _*)(num => strategy.apply(num)))
@@ -384,8 +385,9 @@ object Source {
/*
* Combine the elements of multiple streams into a stream of lists using a combiner function.
*/
- def zipWithN[T, O](zipper: function.Function[java.util.List[T], O],
- sources: java.util.List[Source[T, _ <: Any]]): Source[O, NotUsed] = {
+ def zipWithN[T, O](
+ zipper: function.Function[java.util.List[T], O],
+ sources: java.util.List[Source[T, _ <: Any]]): Source[O, NotUsed] = {
val seq = if (sources != null) Util.immutableSeq(sources).map(_.asScala) else immutable.Seq()
new Source(scaladsl.Source.zipWithN[T, O](seq => zipper.apply(seq.asJava))(seq))
}
@@ -451,9 +453,10 @@ object Source {
* is received. Stream calls close and completes when `read` returns None.
* @param close - function that closes resource
*/
- def unfoldResource[T, S](create: function.Creator[S],
- read: function.Function[S, Optional[T]],
- close: function.Procedure[S]): javadsl.Source[T, NotUsed] =
+ def unfoldResource[T, S](
+ create: function.Creator[S],
+ read: function.Function[S, Optional[T]],
+ close: function.Procedure[S]): javadsl.Source[T, NotUsed] =
new Source(scaladsl.Source.unfoldResource[T, S](create.create _, (s: S) => read.apply(s).asScala, close.apply))
/**
@@ -476,18 +479,15 @@ object Source {
* is received. Stream calls close and completes when `CompletionStage` from read function returns None.
* @param close - function that closes resource
*/
- def unfoldResourceAsync[T, S](create: function.Creator[CompletionStage[S]],
- read: function.Function[S, CompletionStage[Optional[T]]],
- close: function.Function[S, CompletionStage[Done]]): javadsl.Source[T, NotUsed] =
+ def unfoldResourceAsync[T, S](
+ create: function.Creator[CompletionStage[S]],
+ read: function.Function[S, CompletionStage[Optional[T]]],
+ close: function.Function[S, CompletionStage[Done]]): javadsl.Source[T, NotUsed] =
new Source(
- scaladsl.Source.unfoldResourceAsync[T, S](() => create.create().toScala,
- (s: S) =>
- read
- .apply(s)
- .toScala
- .map(_.asScala)(
- akka.dispatch.ExecutionContexts.sameThreadExecutionContext),
- (s: S) => close.apply(s).toScala))
+ scaladsl.Source.unfoldResourceAsync[T, S](
+ () => create.create().toScala,
+ (s: S) => read.apply(s).toScala.map(_.asScala)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext),
+ (s: S) => close.apply(s).toScala))
/**
* Upcast a stream of elements to a stream of supertypes of that element. Useful in combination with
@@ -588,8 +588,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values.
*/
- def viaMat[T, M, M2](flow: Graph[FlowShape[Out, T], M],
- combine: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
+ def viaMat[T, M, M2](
+ flow: Graph[FlowShape[Out, T], M],
+ combine: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
new Source(delegate.viaMat(flow)(combinerToScala(combine)))
/**
@@ -660,9 +661,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* function evaluation when the input stream ends, or completed with `Failure`
* if there is a failure is signaled in the stream.
*/
- def runFoldAsync[U](zero: U,
- f: function.Function2[U, Out, CompletionStage[U]],
- materializer: Materializer): CompletionStage[U] = runWith(Sink.foldAsync(zero, f), materializer)
+ def runFoldAsync[U](
+ zero: U,
+ f: function.Function2[U, Out, CompletionStage[U]],
+ materializer: Materializer): CompletionStage[U] = runWith(Sink.foldAsync(zero, f), materializer)
/**
* Shortcut for running this `Source` with a reduce function.
@@ -716,8 +718,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#concat]].
*/
- def concatMat[M, M2](that: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
+ def concatMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
new Source(delegate.concatMat(that)(combinerToScala(matF)))
/**
@@ -756,8 +759,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#prepend]].
*/
- def prependMat[M, M2](that: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
+ def prependMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
new Source(delegate.prependMat(that)(combinerToScala(matF)))
/**
@@ -795,8 +799,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#orElse]]
*/
- def orElseMat[M, M2](secondary: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
+ def orElseMat[M, M2](
+ secondary: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
new Source(delegate.orElseMat(secondary)(combinerToScala(matF)))
/**
@@ -827,8 +832,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#alsoTo]]
*/
- def alsoToMat[M2, M3](that: Graph[SinkShape[Out], M2],
- matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
+ def alsoToMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
new Source(delegate.alsoToMat(that)(combinerToScala(matF)))
/**
@@ -855,9 +861,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values.
*/
- def divertToMat[M2, M3](that: Graph[SinkShape[Out], M2],
- when: function.Predicate[Out],
- matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
+ def divertToMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ when: function.Predicate[Out],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
new Source(delegate.divertToMat(that, when.test)(combinerToScala(matF)))
/**
@@ -892,8 +899,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#wireTap]]
*/
- def wireTapMat[M2, M3](that: Graph[SinkShape[Out], M2],
- matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
+ def wireTapMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
new Source(delegate.wireTapMat(that)(combinerToScala(matF)))
/**
@@ -960,9 +968,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#interleave]].
*/
- def interleaveMat[M, M2](that: Graph[SourceShape[Out], M],
- segmentSize: Int,
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
+ def interleaveMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ segmentSize: Int,
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
new Source(delegate.interleaveMat(that, segmentSize)(combinerToScala(matF)))
/**
@@ -981,10 +990,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#interleave]]
*/
- def interleaveMat[M, M2](that: Graph[SourceShape[Out], M],
- segmentSize: Int,
- eagerClose: Boolean,
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
+ def interleaveMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ segmentSize: Int,
+ eagerClose: Boolean,
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] =
new Source(delegate.interleaveMat(that, segmentSize, eagerClose)(combinerToScala(matF)))
/**
@@ -1038,9 +1048,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#merge]]
*/
- def mergeMat[M, M2](that: Graph[SourceShape[Out], M],
- matF: function.Function2[Mat, M, M2],
- eagerComplete: Boolean): javadsl.Source[Out, M2] =
+ def mergeMat[M, M2](
+ that: Graph[SourceShape[Out], M],
+ matF: function.Function2[Mat, M, M2],
+ eagerComplete: Boolean): javadsl.Source[Out, M2] =
new Source(delegate.mergeMat(that, eagerComplete)(combinerToScala(matF)))
/**
@@ -1073,9 +1084,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#mergeSorted]].
*/
- def mergeSortedMat[Mat2, Mat3](that: Graph[SourceShape[Out], Mat2],
- comp: util.Comparator[Out],
- matF: function.Function2[Mat, Mat2, Mat3]): javadsl.Source[Out, Mat3] =
+ def mergeSortedMat[Mat2, Mat3](
+ that: Graph[SourceShape[Out], Mat2],
+ comp: util.Comparator[Out],
+ matF: function.Function2[Mat, Mat2, Mat3]): javadsl.Source[Out, Mat3] =
new Source(delegate.mergeSortedMat(that)(combinerToScala(matF))(Ordering.comparatorToOrdering(comp)))
/**
@@ -1100,8 +1112,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#zip]].
*/
- def zipMat[T, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] =
+ def zipMat[T, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] =
this.viaMat(Flow.create[Out].zipMat(that, Keep.right[NotUsed, M]), matF)
/**
@@ -1131,8 +1144,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#zipLatest]].
*/
- def zipLatestMat[T, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] =
+ def zipLatestMat[T, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] =
this.viaMat(Flow.create[Out].zipLatestMat(that, Keep.right[NotUsed, M]), matF)
/**
@@ -1147,8 +1161,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] =
new Source(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1160,9 +1175,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#zipWith]].
*/
- def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M],
- combine: function.Function2[Out, Out2, Out3],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] =
+ def zipWithMat[Out2, Out3, M, M2](
+ that: Graph[SourceShape[Out2], M],
+ combine: function.Function2[Out, Out2, Out3],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] =
new Source(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF)))
/**
@@ -1182,8 +1198,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* '''Cancels when''' downstream cancels
*/
- def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] =
+ def zipLatestWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] =
new Source(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1196,9 +1213,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* @see [[#zipLatestWith]].
*/
- def zipLatestWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M],
- combine: function.Function2[Out, Out2, Out3],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] =
+ def zipLatestWithMat[Out2, Out3, M, M2](
+ that: Graph[SourceShape[Out2], M],
+ combine: function.Function2[Out, Out2, Out3],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] =
new Source(delegate.zipLatestWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF)))
/**
@@ -1373,8 +1391,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*
*/
- def recoverWith(clazz: Class[_ <: Throwable],
- supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] =
+ def recoverWith(
+ clazz: Class[_ <: Throwable],
+ supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] =
recoverWith {
case elem if clazz.isInstance(elem) => supplier.get()
}
@@ -1402,8 +1421,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*
*/
- def recoverWithRetries(attempts: Int,
- pf: PartialFunction[Throwable, _ <: Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] =
+ def recoverWithRetries(
+ attempts: Int,
+ pf: PartialFunction[Throwable, _ <: Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] =
new Source(delegate.recoverWithRetries(attempts, pf))
/**
@@ -1432,9 +1452,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* @param clazz the class object of the failure cause
* @param supplier supply the new Source to be materialized
*/
- def recoverWithRetries(attempts: Int,
- clazz: Class[_ <: Throwable],
- supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] =
+ def recoverWithRetries(
+ attempts: Int,
+ clazz: Class[_ <: Throwable],
+ supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] =
recoverWithRetries(attempts, {
case elem if clazz.isInstance(elem) => supplier.get()
})
@@ -2044,9 +2065,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: FiniteDuration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: FiniteDuration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] =
new Source(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava))
/**
@@ -2067,9 +2089,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise
* IllegalArgumentException is thrown.
*/
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: java.time.Duration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: java.time.Duration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] =
groupedWeightedWithin(maxWeight, costFn, d.asScala)
/**
@@ -2326,8 +2349,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* @param seed Provides the first state for a conflated value using the first unconsumed element as a start
* @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate
*/
- def conflateWithSeed[S](seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] =
+ def conflateWithSeed[S](
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] =
new Source(delegate.conflateWithSeed(seed.apply)(aggregate.apply))
/**
@@ -2381,9 +2405,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate
*/
- def batch[S](max: Long,
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] =
+ def batch[S](
+ max: Long,
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] =
new Source(delegate.batch(max, seed.apply)(aggregate.apply))
/**
@@ -2414,10 +2439,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new batch
*/
- def batchWeighted[S](max: Long,
- costFn: function.Function[Out, java.lang.Long],
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] =
+ def batchWeighted[S](
+ max: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] =
new Source(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply))
/**
@@ -2502,8 +2528,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* @param initial the initial element to be emitted, in case upstream is able to stall the entire stream.
* @see [[#expand]]
*/
- def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
- initial: Out @uncheckedVariance): Source[Out, Mat] =
+ def extrapolate(
+ extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
+ initial: Out @uncheckedVariance): Source[Out, Mat] =
new Source(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial)))
/**
@@ -2551,9 +2578,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*
* '''Cancels when''' downstream cancels or substream cancels
*/
- def prefixAndTail(n: Int)
- : javadsl.Source[Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]],
- Mat] =
+ def prefixAndTail(n: Int): javadsl.Source[
+ Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]],
+ Mat] =
new Source(delegate.prefixAndTail(n).map { case (taken, tail) => Pair(taken.asJava, tail.asJava) })
/**
@@ -2609,9 +2636,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* @param allowClosedSubstreamRecreation enables recreation of already closed substreams if elements with their
* corresponding keys arrive after completion
*/
- def groupBy[K](maxSubstreams: Int,
- f: function.Function[Out, K],
- allowClosedSubstreamRecreation: Boolean): SubSource[Out, Mat] =
+ def groupBy[K](
+ maxSubstreams: Int,
+ f: function.Function[Out, K],
+ allowClosedSubstreamRecreation: Boolean): SubSource[Out, Mat] =
new SubSource(delegate.groupBy(maxSubstreams, f.apply, allowClosedSubstreamRecreation))
/**
@@ -3095,10 +3123,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*
*/
- def throttle(elements: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- mode: ThrottleMode): javadsl.Source[Out, Mat] =
+ def throttle(
+ elements: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ mode: ThrottleMode): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(elements, per.asScala, maximumBurst, mode))
/**
@@ -3133,9 +3162,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- costCalculation: function.Function[Out, Integer]): javadsl.Source[Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: function.Function[Out, Integer]): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(cost, per.asScala, costCalculation.apply _))
/**
@@ -3179,11 +3209,12 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def throttle(cost: Int,
- per: FiniteDuration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.Source[Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: FiniteDuration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(cost, per, maximumBurst, costCalculation.apply _, mode))
/**
@@ -3225,11 +3256,12 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.Source[Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.Source[Out, Mat] =
new Source(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply _, mode))
/**
@@ -3274,10 +3306,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: FiniteDuration,
- costCalculation: (Out) => Int,
- mode: ThrottleMode): javadsl.Source[Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: FiniteDuration,
+ costCalculation: (Out) => Int,
+ mode: ThrottleMode): javadsl.Source[Out, Mat] =
new Source(delegate.throttleEven(cost, per, costCalculation.apply _, mode))
/**
@@ -3292,10 +3325,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: java.time.Duration,
- costCalculation: (Out) => Int,
- mode: ThrottleMode): javadsl.Source[Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: (Out) => Int,
+ mode: ThrottleMode): javadsl.Source[Out, Mat] =
throttleEven(cost, per.asScala, costCalculation, mode)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala
index ae7e8f3a3d..bea0ad6981 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala
@@ -124,8 +124,9 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon
def map[Out2](f: function.Function[Out, Out2]): SourceWithContext[Out2, Ctx, Mat] =
viaScala(_.map(f.apply))
- def mapAsync[Out2](parallelism: Int,
- f: function.Function[Out, CompletionStage[Out2]]): SourceWithContext[Out2, Ctx, Mat] =
+ def mapAsync[Out2](
+ parallelism: Int,
+ f: function.Function[Out, CompletionStage[Out2]]): SourceWithContext[Out2, Ctx, Mat] =
viaScala(_.mapAsync[Out2](parallelism)(o => f.apply(o).toScala))
/**
@@ -220,16 +221,18 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon
* Connect this [[akka.stream.javadsl.SourceWithContext]] to a [[akka.stream.javadsl.Sink]],
* concatenating the processing steps of both.
*/
- def toMat[Mat2, Mat3](sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], Mat2],
- combine: function.Function2[Mat, Mat2, Mat3]): javadsl.RunnableGraph[Mat3] =
+ def toMat[Mat2, Mat3](
+ sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], Mat2],
+ combine: function.Function2[Mat, Mat2, Mat3]): javadsl.RunnableGraph[Mat3] =
RunnableGraph.fromGraph(asScala.asSource.map { case (o, e) => Pair(o, e) }.toMat(sink)(combinerToScala(combine)))
/**
* Connect this [[akka.stream.javadsl.SourceWithContext]] to a [[akka.stream.javadsl.Sink]] and run it.
* The returned value is the materialized value of the `Sink`.
*/
- def runWith[M](sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], M],
- materializer: Materializer): M =
+ def runWith[M](
+ sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], M],
+ materializer: Materializer): M =
toMat(sink, Keep.right[Mat, M]).run(materializer)
def asScala: scaladsl.SourceWithContext[Out, Ctx, Mat] = delegate
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala
index 6ee203655f..79cba02e77 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala
@@ -53,8 +53,9 @@ object StreamConverters {
* @param f A Creator which creates an OutputStream to write to
* @param autoFlush If true the OutputStream will be flushed whenever a byte array is written
*/
- def fromOutputStream(f: function.Creator[OutputStream],
- autoFlush: Boolean): javadsl.Sink[ByteString, CompletionStage[IOResult]] =
+ def fromOutputStream(
+ f: function.Creator[OutputStream],
+ autoFlush: Boolean): javadsl.Sink[ByteString, CompletionStage[IOResult]] =
new Sink(scaladsl.StreamConverters.fromOutputStream(() => f.create(), autoFlush).toCompletionStage())
/**
@@ -126,8 +127,9 @@ object StreamConverters {
*
* The created [[InputStream]] will be closed when the [[Source]] is cancelled.
*/
- def fromInputStream(in: function.Creator[InputStream],
- chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] =
+ def fromInputStream(
+ in: function.Creator[InputStream],
+ chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] =
new Source(scaladsl.StreamConverters.fromInputStream(() => in.create(), chunkSize).toCompletionStage())
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala
index cc3ff2f0f6..bacc95c24b 100755
--- a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala
@@ -699,9 +699,10 @@ class SubFlow[In, Out, Mat](
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: FiniteDuration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: FiniteDuration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] =
new SubFlow(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava))
/**
@@ -722,9 +723,10 @@ class SubFlow[In, Out, Mat](
* `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise
* IllegalArgumentException is thrown.
*/
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: java.time.Duration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: java.time.Duration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] =
groupedWeightedWithin(maxWeight, costFn, d.asScala)
/**
@@ -955,8 +957,9 @@ class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def recoverWithRetries(attempts: Int,
- pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubFlow[In, Out, Mat] =
+ def recoverWithRetries(
+ attempts: Int,
+ pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubFlow[In, Out, Mat] =
new SubFlow(delegate.recoverWithRetries(attempts, pf))
/**
@@ -1070,8 +1073,9 @@ class SubFlow[In, Out, Mat](
* @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate
*
*/
- def conflateWithSeed[S](seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] =
+ def conflateWithSeed[S](
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] =
new SubFlow(delegate.conflateWithSeed(seed.apply)(aggregate.apply))
/**
@@ -1127,9 +1131,10 @@ class SubFlow[In, Out, Mat](
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate
*/
- def batch[S](max: Long,
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] =
+ def batch[S](
+ max: Long,
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] =
new SubFlow(delegate.batch(max, seed.apply)(aggregate.apply))
/**
@@ -1160,10 +1165,11 @@ class SubFlow[In, Out, Mat](
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new batch
*/
- def batchWeighted[S](max: Long,
- costFn: function.Function[Out, java.lang.Long],
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] =
+ def batchWeighted[S](
+ max: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] =
new SubFlow(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply))
/**
@@ -1249,8 +1255,9 @@ class SubFlow[In, Out, Mat](
* @param initial the initial element to be emitted, in case upstream is able to stall the entire stream.
* @see [[#expand]]
*/
- def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
- initial: Out @uncheckedVariance): SubFlow[In, Out, Mat] =
+ def extrapolate(
+ extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
+ initial: Out @uncheckedVariance): SubFlow[In, Out, Mat] =
new SubFlow(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial)))
/**
@@ -1298,10 +1305,10 @@ class SubFlow[In, Out, Mat](
*
* '''Cancels when''' downstream cancels or substream cancels
*/
- def prefixAndTail(n: Int)
- : SubFlow[In,
- akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]],
- Mat] =
+ def prefixAndTail(n: Int): SubFlow[
+ In,
+ akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]],
+ Mat] =
new SubFlow(delegate.prefixAndTail(n).map { case (taken, tail) => akka.japi.Pair(taken.asJava, tail.asJava) })
/**
@@ -1555,8 +1562,9 @@ class SubFlow[In, Out, Mat](
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] =
new SubFlow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1572,8 +1580,9 @@ class SubFlow[In, Out, Mat](
*
* '''Cancels when''' downstream cancels
*/
- def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] =
+ def zipLatestWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] =
new SubFlow(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1836,10 +1845,11 @@ class SubFlow[In, Out, Mat](
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def throttle(elements: Int,
- per: FiniteDuration,
- maximumBurst: Int,
- mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
+ def throttle(
+ elements: Int,
+ per: FiniteDuration,
+ maximumBurst: Int,
+ mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(elements, per, maximumBurst, mode))
/**
@@ -1878,10 +1888,11 @@ class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def throttle(elements: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
+ def throttle(
+ elements: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(elements, per.asScala, maximumBurst, mode))
/**
@@ -1916,9 +1927,10 @@ class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- costCalculation: function.Function[Out, Integer]): javadsl.SubFlow[In, Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: function.Function[Out, Integer]): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(cost, per.asScala, costCalculation.apply))
/**
@@ -1962,11 +1974,12 @@ class SubFlow[In, Out, Mat](
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def throttle(cost: Int,
- per: FiniteDuration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: FiniteDuration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(cost, per, maximumBurst, costCalculation.apply, mode))
/**
@@ -2008,11 +2021,12 @@ class SubFlow[In, Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode))
/**
@@ -2057,10 +2071,11 @@ class SubFlow[In, Out, Mat](
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: FiniteDuration,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: FiniteDuration,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
new SubFlow(delegate.throttleEven(cost, per, costCalculation.apply, mode))
/**
@@ -2075,10 +2090,11 @@ class SubFlow[In, Out, Mat](
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: java.time.Duration,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] =
throttleEven(cost, per.asScala, costCalculation, mode)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala
index 02bda5e334..98aa6b79bb 100755
--- a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala
@@ -686,9 +686,10 @@ class SubSource[Out, Mat](
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: FiniteDuration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: FiniteDuration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] =
new SubSource(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava))
/**
@@ -709,9 +710,10 @@ class SubSource[Out, Mat](
* `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise
* IllegalArgumentException is thrown.
*/
- def groupedWeightedWithin(maxWeight: Long,
- costFn: function.Function[Out, java.lang.Long],
- d: java.time.Duration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] =
+ def groupedWeightedWithin(
+ maxWeight: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ d: java.time.Duration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] =
groupedWeightedWithin(maxWeight, costFn, d.asScala)
/**
@@ -935,8 +937,9 @@ class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def recoverWithRetries(attempts: Int,
- pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubSource[Out, Mat] =
+ def recoverWithRetries(
+ attempts: Int,
+ pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubSource[Out, Mat] =
new SubSource(delegate.recoverWithRetries(attempts, pf))
/**
@@ -1050,8 +1053,9 @@ class SubSource[Out, Mat](
* @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate
*
*/
- def conflateWithSeed[S](seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] =
+ def conflateWithSeed[S](
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] =
new SubSource(delegate.conflateWithSeed(seed.apply)(aggregate.apply))
/**
@@ -1107,9 +1111,10 @@ class SubSource[Out, Mat](
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate
*/
- def batch[S](max: Long,
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] =
+ def batch[S](
+ max: Long,
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] =
new SubSource(delegate.batch(max, seed.apply)(aggregate.apply))
/**
@@ -1140,10 +1145,11 @@ class SubSource[Out, Mat](
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new batch
*/
- def batchWeighted[S](max: Long,
- costFn: function.Function[Out, java.lang.Long],
- seed: function.Function[Out, S],
- aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] =
+ def batchWeighted[S](
+ max: Long,
+ costFn: function.Function[Out, java.lang.Long],
+ seed: function.Function[Out, S],
+ aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] =
new SubSource(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply))
/**
@@ -1228,8 +1234,9 @@ class SubSource[Out, Mat](
* @param initial the initial element to be emitted, in case upstream is able to stall the entire stream.
* @see [[#expand]]
*/
- def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
- initial: Out @uncheckedVariance): SubSource[Out, Mat] =
+ def extrapolate(
+ extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]],
+ initial: Out @uncheckedVariance): SubSource[Out, Mat] =
new SubSource(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial)))
/**
@@ -1534,8 +1541,9 @@ class SubSource[Out, Mat](
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] =
new SubSource(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1551,8 +1559,9 @@ class SubSource[Out, Mat](
*
* '''Cancels when''' downstream cancels
*/
- def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] =
+ def zipLatestWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] =
new SubSource(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1854,10 +1863,11 @@ class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def throttle(elements: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
+ def throttle(
+ elements: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(elements, per.asScala, maximumBurst, mode))
/**
@@ -1892,9 +1902,10 @@ class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- costCalculation: function.Function[Out, Integer]): javadsl.SubSource[Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: function.Function[Out, Integer]): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(cost, per.asScala, costCalculation.apply _))
/**
@@ -1938,11 +1949,12 @@ class SubSource[Out, Mat](
*/
@Deprecated
@deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12")
- def throttle(cost: Int,
- per: FiniteDuration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: FiniteDuration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(cost, per, maximumBurst, costCalculation.apply _, mode))
/**
@@ -1984,11 +1996,12 @@ class SubSource[Out, Mat](
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: java.time.Duration,
- maximumBurst: Int,
- costCalculation: function.Function[Out, Integer],
- mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
+ def throttle(
+ cost: Int,
+ per: java.time.Duration,
+ maximumBurst: Int,
+ costCalculation: function.Function[Out, Integer],
+ mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply _, mode))
/**
@@ -2033,10 +2046,11 @@ class SubSource[Out, Mat](
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: FiniteDuration,
- costCalculation: (Out) => Int,
- mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: FiniteDuration,
+ costCalculation: (Out) => Int,
+ mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
new SubSource(delegate.throttleEven(cost, per, costCalculation.apply _, mode))
/**
@@ -2051,10 +2065,11 @@ class SubSource[Out, Mat](
*/
@Deprecated
@deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12")
- def throttleEven(cost: Int,
- per: java.time.Duration,
- costCalculation: (Out) => Int,
- mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
+ def throttleEven(
+ cost: Int,
+ per: java.time.Duration,
+ costCalculation: (Out) => Int,
+ mode: ThrottleMode): javadsl.SubSource[Out, Mat] =
throttleEven(cost, per.asScala, costCalculation, mode)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala b/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala
index 4a469e884c..b92dc116cc 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala
@@ -66,10 +66,11 @@ object TLS {
*
* This method uses the default closing behavior or [[IgnoreComplete]].
*/
- def create(sslContext: SSLContext,
- sslConfig: Optional[AkkaSSLConfig],
- firstSession: NegotiateNewSession,
- role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def create(
+ sslContext: SSLContext,
+ sslConfig: Optional[AkkaSSLConfig],
+ firstSession: NegotiateNewSession,
+ role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new javadsl.BidiFlow(scaladsl.TLS.apply(sslContext, OptionConverters.toScala(sslConfig), firstSession, role))
/**
@@ -83,9 +84,10 @@ object TLS {
*
* This method uses the default closing behavior or [[IgnoreComplete]].
*/
- def create(sslContext: SSLContext,
- firstSession: NegotiateNewSession,
- role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def create(
+ sslContext: SSLContext,
+ firstSession: NegotiateNewSession,
+ role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new javadsl.BidiFlow(scaladsl.TLS.apply(sslContext, None, firstSession, role))
/**
@@ -104,19 +106,21 @@ object TLS {
* The SSLEngine may use this information e.g. when an endpoint identification algorithm was
* configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]].
*/
- def create(sslContext: SSLContext,
- sslConfig: Optional[AkkaSSLConfig],
- firstSession: NegotiateNewSession,
- role: TLSRole,
- hostInfo: Optional[japi.Pair[String, java.lang.Integer]],
- closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def create(
+ sslContext: SSLContext,
+ sslConfig: Optional[AkkaSSLConfig],
+ firstSession: NegotiateNewSession,
+ role: TLSRole,
+ hostInfo: Optional[japi.Pair[String, java.lang.Integer]],
+ closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new javadsl.BidiFlow(
- scaladsl.TLS.apply(sslContext,
- OptionConverters.toScala(sslConfig),
- firstSession,
- role,
- closing,
- OptionConverters.toScala(hostInfo).map(e => (e.first, e.second))))
+ scaladsl.TLS.apply(
+ sslContext,
+ OptionConverters.toScala(sslConfig),
+ firstSession,
+ role,
+ closing,
+ OptionConverters.toScala(hostInfo).map(e => (e.first, e.second))))
/**
* Create a StreamTls [[akka.stream.javadsl.BidiFlow]] in client mode. The
@@ -134,18 +138,20 @@ object TLS {
* The SSLEngine may use this information e.g. when an endpoint identification algorithm was
* configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]].
*/
- def create(sslContext: SSLContext,
- firstSession: NegotiateNewSession,
- role: TLSRole,
- hostInfo: Optional[japi.Pair[String, java.lang.Integer]],
- closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def create(
+ sslContext: SSLContext,
+ firstSession: NegotiateNewSession,
+ role: TLSRole,
+ hostInfo: Optional[japi.Pair[String, java.lang.Integer]],
+ closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new javadsl.BidiFlow(
- scaladsl.TLS.apply(sslContext,
- None,
- firstSession,
- role,
- closing,
- OptionConverters.toScala(hostInfo).map(e => (e.first, e.second))))
+ scaladsl.TLS.apply(
+ sslContext,
+ None,
+ firstSession,
+ role,
+ closing,
+ OptionConverters.toScala(hostInfo).map(e => (e.first, e.second))))
/**
* Create a StreamTls [[akka.stream.javadsl.BidiFlow]]. This is a low-level interface.
@@ -158,9 +164,10 @@ object TLS {
*
* For a description of the `closing` parameter please refer to [[TLSClosing]].
*/
- def create(sslEngineCreator: Supplier[SSLEngine],
- sessionVerifier: Consumer[SSLSession],
- closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def create(
+ sslEngineCreator: Supplier[SSLEngine],
+ sessionVerifier: Consumer[SSLSession],
+ closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new javadsl.BidiFlow(
scaladsl.TLS.apply(() => sslEngineCreator.get(), session => Try(sessionVerifier.accept(session)), closing))
@@ -172,8 +179,9 @@ object TLS {
*
* For a description of the `closing` parameter please refer to [[TLSClosing]].
*/
- def create(sslEngineCreator: Supplier[SSLEngine],
- closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def create(
+ sslEngineCreator: Supplier[SSLEngine],
+ closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new javadsl.BidiFlow(scaladsl.TLS.apply(() => sslEngineCreator.get(), closing))
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala
index 2c85bf9d13..df3dcc5914 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala
@@ -139,12 +139,13 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* independently whether the client is still attempting to write. This setting is recommended
* for servers, and therefore it is the default setting.
*/
- def bind(interface: String,
- port: Int,
- backlog: Int,
- options: JIterable[SocketOption],
- halfClose: Boolean,
- idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] =
+ def bind(
+ interface: String,
+ port: Int,
+ backlog: Int,
+ options: JIterable[SocketOption],
+ halfClose: Boolean,
+ idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] =
Source.fromGraph(
delegate
.bind(interface, port, backlog, immutableSeq(options), halfClose, idleTimeout)
@@ -186,20 +187,22 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* If set to false, the connection will immediately closed once the client closes its write side,
* independently whether the server is still attempting to write.
*/
- def outgoingConnection(remoteAddress: InetSocketAddress,
- localAddress: Optional[InetSocketAddress],
- options: JIterable[SocketOption],
- halfClose: Boolean,
- connectTimeout: Duration,
- idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
+ def outgoingConnection(
+ remoteAddress: InetSocketAddress,
+ localAddress: Optional[InetSocketAddress],
+ options: JIterable[SocketOption],
+ halfClose: Boolean,
+ connectTimeout: Duration,
+ idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
Flow.fromGraph(
delegate
- .outgoingConnection(remoteAddress,
- localAddress.asScala,
- immutableSeq(options),
- halfClose,
- connectTimeout,
- idleTimeout)
+ .outgoingConnection(
+ remoteAddress,
+ localAddress.asScala,
+ immutableSeq(options),
+ halfClose,
+ connectTimeout,
+ idleTimeout)
.mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava))
/**
@@ -243,22 +246,24 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* Marked API-may-change to leave room for an improvement around the very long parameter list.
*/
@ApiMayChange
- def outgoingTlsConnection(remoteAddress: InetSocketAddress,
- sslContext: SSLContext,
- negotiateNewSession: NegotiateNewSession,
- localAddress: Optional[InetSocketAddress],
- options: JIterable[SocketOption],
- connectTimeout: Duration,
- idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
+ def outgoingTlsConnection(
+ remoteAddress: InetSocketAddress,
+ sslContext: SSLContext,
+ negotiateNewSession: NegotiateNewSession,
+ localAddress: Optional[InetSocketAddress],
+ options: JIterable[SocketOption],
+ connectTimeout: Duration,
+ idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
Flow.fromGraph(
delegate
- .outgoingTlsConnection(remoteAddress,
- sslContext,
- negotiateNewSession,
- localAddress.asScala,
- immutableSeq(options),
- connectTimeout,
- idleTimeout)
+ .outgoingTlsConnection(
+ remoteAddress,
+ sslContext,
+ negotiateNewSession,
+ localAddress.asScala,
+ immutableSeq(options),
+ connectTimeout,
+ idleTimeout)
.mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava))
/**
@@ -269,14 +274,15 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* Marked API-may-change to leave room for an improvement around the very long parameter list.
*/
@ApiMayChange
- def bindTls(interface: String,
- port: Int,
- sslContext: SSLContext,
- negotiateNewSession: NegotiateNewSession,
- backlog: Int,
- options: JIterable[SocketOption],
- halfClose: Boolean,
- idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] =
+ def bindTls(
+ interface: String,
+ port: Int,
+ sslContext: SSLContext,
+ negotiateNewSession: NegotiateNewSession,
+ backlog: Int,
+ options: JIterable[SocketOption],
+ halfClose: Boolean,
+ idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] =
Source.fromGraph(
delegate
.bindTls(interface, port, sslContext, negotiateNewSession, backlog, immutableSeq(options), idleTimeout)
@@ -289,10 +295,11 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* @see [[Tcp.bind()]]
*/
- def bindTls(interface: String,
- port: Int,
- sslContext: SSLContext,
- negotiateNewSession: NegotiateNewSession): Source[IncomingConnection, CompletionStage[ServerBinding]] =
+ def bindTls(
+ interface: String,
+ port: Int,
+ sslContext: SSLContext,
+ negotiateNewSession: NegotiateNewSession): Source[IncomingConnection, CompletionStage[ServerBinding]] =
Source.fromGraph(
delegate
.bindTls(interface, port, sslContext, negotiateNewSession)
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala
index b1f854b844..fe1ebdf506 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala
@@ -10,8 +10,9 @@ import akka.stream.impl.{ LinearTraversalBuilder, Timers, TraversalBuilder }
import scala.concurrent.duration.FiniteDuration
-final class BidiFlow[-I1, +O1, -I2, +O2, +Mat](override val traversalBuilder: TraversalBuilder,
- override val shape: BidiShape[I1, O1, I2, O2])
+final class BidiFlow[-I1, +O1, -I2, +O2, +Mat](
+ override val traversalBuilder: TraversalBuilder,
+ override val shape: BidiShape[I1, O1, I2, O2])
extends Graph[BidiShape[I1, O1, I2, O2], Mat] {
def asJava[JI1 <: I1, JO1 >: O1, JI2 <: I2, JO2 >: O2, JMat >: Mat]: javadsl.BidiFlow[JI1, JO1, JI2, JO2, JMat] =
@@ -72,8 +73,9 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat](override val traversalBuilder: Tr
.wire(newBidi1Shape.out1, newBidi2Shape.in1)
.wire(newBidi2Shape.out2, newBidi1Shape.in2)
- new BidiFlow(newTraversalBuilder,
- BidiShape(newBidi1Shape.in1, newBidi2Shape.out1, newBidi2Shape.in2, newBidi1Shape.out2))
+ new BidiFlow(
+ newTraversalBuilder,
+ BidiShape(newBidi1Shape.in1, newBidi2Shape.out1, newBidi2Shape.in2, newBidi1Shape.out2))
}
/**
@@ -237,11 +239,12 @@ object BidiFlow {
val newFlow1Shape = flow1.shape.deepCopy()
val newFlow2Shape = flow2.shape.deepCopy()
- new BidiFlow(TraversalBuilder
- .empty()
- .add(flow1.traversalBuilder, newFlow1Shape, Keep.right)
- .add(flow2.traversalBuilder, newFlow2Shape, combine),
- BidiShape(newFlow1Shape.in, newFlow1Shape.out, newFlow2Shape.in, newFlow2Shape.out))
+ new BidiFlow(
+ TraversalBuilder
+ .empty()
+ .add(flow1.traversalBuilder, newFlow1Shape, Keep.right)
+ .add(flow2.traversalBuilder, newFlow2Shape, combine),
+ BidiShape(newFlow1Shape.in, newFlow1Shape.out, newFlow2Shape.in, newFlow2Shape.out))
}
/**
@@ -262,8 +265,9 @@ object BidiFlow {
* }}}
*
*/
- def fromFlows[I1, O1, I2, O2, M1, M2](flow1: Graph[FlowShape[I1, O1], M1],
- flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
+ def fromFlows[I1, O1, I2, O2, M1, M2](
+ flow1: Graph[FlowShape[I1, O1], M1],
+ flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
fromFlowsMat(flow1, flow2)(Keep.none)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala
index 1c4a667f05..b9d2f7a6f9 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala
@@ -89,8 +89,9 @@ object FileIO {
* @param options File open options, see [[java.nio.file.StandardOpenOption]], defaults to Set(WRITE, TRUNCATE_EXISTING, CREATE)
*/
@deprecated("Use `toPath` instead", "2.4.5")
- def toFile(f: File,
- options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] =
+ def toFile(
+ f: File,
+ options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] =
toPath(f.toPath, options)
/**
@@ -112,8 +113,9 @@ object FileIO {
* @param f the file path to write to
* @param options File open options, see [[java.nio.file.StandardOpenOption]], defaults to Set(WRITE, TRUNCATE_EXISTING, CREATE)
*/
- def toPath(f: Path,
- options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] =
+ def toPath(
+ f: Path,
+ options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] =
toPath(f, options, startPosition = 0)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala
index 44b84f4d4a..c95fbddc34 100755
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala
@@ -37,8 +37,9 @@ import scala.reflect.ClassTag
/**
* A `Flow` is a set of stream processing steps that has one open input and one open output.
*/
-final class Flow[-In, +Out, +Mat](override val traversalBuilder: LinearTraversalBuilder,
- override val shape: FlowShape[In, Out])
+final class Flow[-In, +Out, +Mat](
+ override val traversalBuilder: LinearTraversalBuilder,
+ override val shape: FlowShape[In, Out])
extends FlowOpsMat[Out, Mat]
with Graph[FlowShape[In, Out], Mat] {
@@ -76,12 +77,14 @@ final class Flow[-In, +Out, +Mat](override val traversalBuilder: LinearTraversal
val useCombine =
if (combine == Keep.right) Keep.none
else combine
- new Flow(traversalBuilder.append(LinearTraversalBuilder.empty(), shape, useCombine),
- FlowShape[In, T](shape.in, flow.shape.out))
+ new Flow(
+ traversalBuilder.append(LinearTraversalBuilder.empty(), shape, useCombine),
+ FlowShape[In, T](shape.in, flow.shape.out))
}
} else {
- new Flow(traversalBuilder.append(flow.traversalBuilder, flow.shape, combine),
- FlowShape[In, T](shape.in, flow.shape.out))
+ new Flow(
+ traversalBuilder.append(flow.traversalBuilder, flow.shape, combine),
+ FlowShape[In, T](shape.in, flow.shape.out))
}
}
@@ -380,8 +383,9 @@ object Flow {
// behave as it is the operator with regards to attributes
val attrs = g.traversalBuilder.attributes
val noAttrStage = g.withAttributes(Attributes.none)
- new Flow(LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right),
- noAttrStage.shape).withAttributes(attrs)
+ new Flow(
+ LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right),
+ noAttrStage.shape).withAttributes(attrs)
case other => new Flow(LinearTraversalBuilder.fromBuilder(g.traversalBuilder, g.shape, Keep.right), g.shape)
}
@@ -504,8 +508,9 @@ object Flow {
*
* See also [[fromSinkAndSourceCoupledMat]] when access to materialized values of the parameters is needed.
*/
- def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _],
- source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] =
+ def fromSinkAndSourceCoupled[I, O](
+ sink: Graph[SinkShape[I], _],
+ source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] =
fromSinkAndSourceCoupledMat(sink, source)(Keep.none)
/**
@@ -558,8 +563,9 @@ object Flow {
* '''Cancels when''' downstream cancels
*/
@Deprecated
- @deprecated("Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)",
- "2.5.12")
+ @deprecated(
+ "Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)",
+ "2.5.12")
def lazyInit[I, O, M](flowFactory: I => Future[Flow[I, O, M]], fallback: () => M): Flow[I, O, M] =
Flow.fromGraph(new LazyFlow[I, O, M](flowFactory)).mapMaterializedValue(_ => fallback())
@@ -754,8 +760,9 @@ trait FlowOps[+Out, +Mat] {
* @param pf Receives the failure cause and returns the new Source to be materialized if any
*
*/
- def recoverWithRetries[T >: Out](attempts: Int,
- pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] =
+ def recoverWithRetries[T >: Out](
+ attempts: Int,
+ pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] =
via(new RecoverWith(attempts, pf))
/**
@@ -1852,9 +1859,10 @@ trait FlowOps[+Out, +Mat] {
* @param allowClosedSubstreamRecreation enables recreation of already closed substreams if elements with their
* corresponding keys arrive after completion
*/
- def groupBy[K](maxSubstreams: Int,
- f: Out => K,
- allowClosedSubstreamRecreation: Boolean): SubFlow[Out, Mat, Repr, Closed] = {
+ def groupBy[K](
+ maxSubstreams: Int,
+ f: Out => K,
+ allowClosedSubstreamRecreation: Boolean): SubFlow[Out, Mat, Repr, Closed] = {
val merge = new SubFlowImpl.MergeBack[Out, Repr] {
override def apply[T](flow: Flow[Out, T, NotUsed], breadth: Int): Repr[T] =
via(new GroupBy(maxSubstreams, f, allowClosedSubstreamRecreation))
@@ -2282,11 +2290,12 @@ trait FlowOps[+Out, +Mat] {
* '''Cancels when''' downstream cancels
*
*/
- def throttle(cost: Int,
- per: FiniteDuration,
- maximumBurst: Int,
- costCalculation: (Out) => Int,
- mode: ThrottleMode): Repr[Out] =
+ def throttle(
+ cost: Int,
+ per: FiniteDuration,
+ maximumBurst: Int,
+ costCalculation: (Out) => Int,
+ mode: ThrottleMode): Repr[Out] =
via(new Throttle(cost, per, maximumBurst, costCalculation, mode))
/**
@@ -2564,8 +2573,9 @@ trait FlowOps[+Out, +Mat] {
def merge[U >: Out, M](that: Graph[SourceShape[U], M], eagerComplete: Boolean = false): Repr[U] =
via(mergeGraph(that, eagerComplete))
- protected def mergeGraph[U >: Out, M](that: Graph[SourceShape[U], M],
- eagerComplete: Boolean): Graph[FlowShape[Out @uncheckedVariance, U], M] =
+ protected def mergeGraph[U >: Out, M](
+ that: Graph[SourceShape[U], M],
+ eagerComplete: Boolean): Graph[FlowShape[Out @uncheckedVariance, U], M] =
GraphDSL.create(that) { implicit b => r =>
val merge = b.add(Merge[U](2, eagerComplete))
r ~> merge.in(1)
@@ -2760,8 +2770,9 @@ trait FlowOps[+Out, +Mat] {
*/
def divertTo(that: Graph[SinkShape[Out], _], when: Out => Boolean): Repr[Out] = via(divertToGraph(that, when))
- protected def divertToGraph[M](that: Graph[SinkShape[Out], M],
- when: Out => Boolean): Graph[FlowShape[Out @uncheckedVariance, Out], M] =
+ protected def divertToGraph[M](
+ that: Graph[SinkShape[Out], M],
+ when: Out => Boolean): Graph[FlowShape[Out @uncheckedVariance, Out], M] =
GraphDSL.create(that) { implicit b => r =>
import GraphDSL.Implicits._
val partition = b.add(new Partition[Out](2, out => if (when(out)) 1 else 0, true))
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala
index 1cbcc63b6f..0896ef75aa 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala
@@ -33,9 +33,10 @@ object Framing {
* @param maximumFrameLength The maximum length of allowed frames while decoding. If the maximum length is
* exceeded this Flow will fail the stream.
*/
- def delimiter(delimiter: ByteString,
- maximumFrameLength: Int,
- allowTruncation: Boolean = false): Flow[ByteString, ByteString, NotUsed] =
+ def delimiter(
+ delimiter: ByteString,
+ maximumFrameLength: Int,
+ allowTruncation: Boolean = false): Flow[ByteString, ByteString, NotUsed] =
Flow[ByteString]
.via(new DelimiterFramingStage(delimiter, maximumFrameLength, allowTruncation))
.named("delimiterFraming")
@@ -54,10 +55,11 @@ object Framing {
* the length of the size field)
* @param byteOrder The ''ByteOrder'' to be used when decoding the field
*/
- def lengthField(fieldLength: Int,
- fieldOffset: Int = 0,
- maximumFrameLength: Int,
- byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = {
+ def lengthField(
+ fieldLength: Int,
+ fieldOffset: Int = 0,
+ maximumFrameLength: Int,
+ byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = {
require(fieldLength >= 1 && fieldLength <= 4, "Length field length must be 1, 2, 3 or 4.")
Flow[ByteString]
.via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder))
@@ -83,11 +85,12 @@ object Framing {
* ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise.
*
*/
- def lengthField(fieldLength: Int,
- fieldOffset: Int,
- maximumFrameLength: Int,
- byteOrder: ByteOrder,
- computeFrameSize: (Array[Byte], Int) => Int): Flow[ByteString, ByteString, NotUsed] = {
+ def lengthField(
+ fieldLength: Int,
+ fieldOffset: Int,
+ maximumFrameLength: Int,
+ byteOrder: ByteOrder,
+ computeFrameSize: (Array[Byte], Int) => Int): Flow[ByteString, ByteString, NotUsed] = {
require(fieldLength >= 1 && fieldLength <= 4, "Length field length must be 1, 2, 3 or 4.")
Flow[ByteString]
.via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder, Some(computeFrameSize)))
@@ -126,8 +129,9 @@ object Framing {
*/
def simpleFramingProtocol(
maximumMessageLength: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = {
- BidiFlow.fromFlowsMat(simpleFramingProtocolEncoder(maximumMessageLength),
- simpleFramingProtocolDecoder(maximumMessageLength))(Keep.left)
+ BidiFlow.fromFlowsMat(
+ simpleFramingProtocolEncoder(maximumMessageLength),
+ simpleFramingProtocolDecoder(maximumMessageLength))(Keep.left)
}
/**
@@ -192,9 +196,10 @@ object Framing {
}
}
- private class DelimiterFramingStage(val separatorBytes: ByteString,
- val maximumLineBytes: Int,
- val allowTruncation: Boolean)
+ private class DelimiterFramingStage(
+ val separatorBytes: ByteString,
+ val maximumLineBytes: Int,
+ val allowTruncation: Boolean)
extends GraphStage[FlowShape[ByteString, ByteString]] {
val in = Inlet[ByteString]("DelimiterFramingStage.in")
@@ -357,11 +362,12 @@ object Framing {
}
}
- private final class LengthFieldFramingStage(val lengthFieldLength: Int,
- val lengthFieldOffset: Int,
- val maximumFrameLength: Int,
- val byteOrder: ByteOrder,
- computeFrameSize: Option[(Array[Byte], Int) => Int])
+ private final class LengthFieldFramingStage(
+ val lengthFieldLength: Int,
+ val lengthFieldOffset: Int,
+ val maximumFrameLength: Int,
+ val byteOrder: ByteOrder,
+ computeFrameSize: Option[(Array[Byte], Int) => Int])
extends GraphStage[FlowShape[ByteString, ByteString]] {
//for the sake of binary compatibility
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala
index 4c17c2c914..c999d54b34 100755
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala
@@ -27,8 +27,9 @@ import scala.util.control.{ NoStackTrace, NonFatal }
*
* The implementation of a graph with an arbitrary shape.
*/
-private[stream] final class GenericGraph[S <: Shape, Mat](override val shape: S,
- override val traversalBuilder: TraversalBuilder)
+private[stream] final class GenericGraph[S <: Shape, Mat](
+ override val shape: S,
+ override val traversalBuilder: TraversalBuilder)
extends Graph[S, Mat] { outer =>
override def toString: String = s"GenericGraph($shape)"
@@ -129,31 +130,32 @@ final class Merge[T](val inputPorts: Int, val eagerComplete: Boolean) extends Gr
val i = in(ix)
ix += 1
- setHandler(i,
- new InHandler {
- override def onPush(): Unit = {
- if (isAvailable(out)) {
- // isAvailable(out) implies !pending
- // -> grab and push immediately
- push(out, grab(i))
- tryPull(i)
- } else pendingQueue.enqueue(i)
- }
+ setHandler(
+ i,
+ new InHandler {
+ override def onPush(): Unit = {
+ if (isAvailable(out)) {
+ // isAvailable(out) implies !pending
+ // -> grab and push immediately
+ push(out, grab(i))
+ tryPull(i)
+ } else pendingQueue.enqueue(i)
+ }
- override def onUpstreamFinish() =
- if (eagerComplete) {
- var ix2 = 0
- while (ix2 < in.size) {
- cancel(in(ix2))
- ix2 += 1
- }
- runningUpstreams = 0
- if (!pending) completeStage()
- } else {
- runningUpstreams -= 1
- if (upstreamsClosed && !pending) completeStage()
- }
- })
+ override def onUpstreamFinish() =
+ if (eagerComplete) {
+ var ix2 = 0
+ while (ix2 < in.size) {
+ cancel(in(ix2))
+ ix2 += 1
+ }
+ runningUpstreams = 0
+ if (!pending) completeStage()
+ } else {
+ runningUpstreams -= 1
+ if (upstreamsClosed && !pending) completeStage()
+ }
+ })
}
override def onPull(): Unit = {
@@ -243,49 +245,51 @@ final class MergePreferred[T](val secondaryPorts: Int, val eagerComplete: Boolea
val maxEmitting = 2
var preferredEmitting = 0
- setHandler(preferred,
- new InHandler {
- override def onUpstreamFinish(): Unit = onComplete()
- override def onPush(): Unit =
- if (preferredEmitting == maxEmitting) () // blocked
- else emitPreferred()
+ setHandler(
+ preferred,
+ new InHandler {
+ override def onUpstreamFinish(): Unit = onComplete()
+ override def onPush(): Unit =
+ if (preferredEmitting == maxEmitting) () // blocked
+ else emitPreferred()
- def emitPreferred(): Unit = {
- preferredEmitting += 1
- emit(out, grab(preferred), emitted)
- tryPull(preferred)
- }
+ def emitPreferred(): Unit = {
+ preferredEmitting += 1
+ emit(out, grab(preferred), emitted)
+ tryPull(preferred)
+ }
- val emitted = () => {
- preferredEmitting -= 1
- if (isAvailable(preferred)) emitPreferred()
- else if (preferredEmitting == 0) emitSecondary()
- }
+ val emitted = () => {
+ preferredEmitting -= 1
+ if (isAvailable(preferred)) emitPreferred()
+ else if (preferredEmitting == 0) emitSecondary()
+ }
- def emitSecondary(): Unit = {
- var i = 0
- while (i < secondaryPorts) {
- val port = in(i)
- if (isAvailable(port)) emit(out, grab(port), pullMe(i))
- i += 1
- }
- }
- })
+ def emitSecondary(): Unit = {
+ var i = 0
+ while (i < secondaryPorts) {
+ val port = in(i)
+ if (isAvailable(port)) emit(out, grab(port), pullMe(i))
+ i += 1
+ }
+ }
+ })
var i = 0
while (i < secondaryPorts) {
val port = in(i)
val pullPort = pullMe(i)
- setHandler(port,
- new InHandler {
- override def onPush(): Unit = {
- if (preferredEmitting > 0) () // blocked
- else {
- emit(out, grab(port), pullPort)
- }
- }
- override def onUpstreamFinish(): Unit = onComplete()
- })
+ setHandler(
+ port,
+ new InHandler {
+ override def onPush(): Unit = {
+ if (preferredEmitting > 0) () // blocked
+ else {
+ emit(out, grab(port), pullPort)
+ }
+ }
+ override def onUpstreamFinish(): Unit = onComplete()
+ })
i += 1
}
@@ -340,28 +344,29 @@ final class MergePrioritized[T] private (val priorities: Seq[Int], val eagerComp
in.zip(allBuffers).foreach {
case (inlet, buffer) =>
- setHandler(inlet,
- new InHandler {
- override def onPush(): Unit = {
- if (isAvailable(out) && !hasPending) {
- push(out, grab(inlet))
- tryPull(inlet)
- } else {
- buffer.enqueue(inlet)
- }
- }
+ setHandler(
+ inlet,
+ new InHandler {
+ override def onPush(): Unit = {
+ if (isAvailable(out) && !hasPending) {
+ push(out, grab(inlet))
+ tryPull(inlet)
+ } else {
+ buffer.enqueue(inlet)
+ }
+ }
- override def onUpstreamFinish(): Unit = {
- if (eagerComplete) {
- in.foreach(cancel)
- runningUpstreams = 0
- if (!hasPending) completeStage()
- } else {
- runningUpstreams -= 1
- if (upstreamsClosed && !hasPending) completeStage()
- }
- }
- })
+ override def onUpstreamFinish(): Unit = {
+ if (eagerComplete) {
+ in.foreach(cancel)
+ runningUpstreams = 0
+ if (!hasPending) completeStage()
+ } else {
+ runningUpstreams -= 1
+ if (upstreamsClosed && !hasPending) completeStage()
+ }
+ }
+ })
}
override def onPull(): Unit = {
@@ -420,9 +425,10 @@ object Interleave {
* @param segmentSize number of elements to send downstream before switching to next input port
* @param eagerClose if true, interleave completes upstream if any of its upstream completes.
*/
- def apply[T](inputPorts: Int,
- segmentSize: Int,
- eagerClose: Boolean = false): Graph[UniformFanInShape[T, T], NotUsed] =
+ def apply[T](
+ inputPorts: Int,
+ segmentSize: Int,
+ eagerClose: Boolean = false): Graph[UniformFanInShape[T, T], NotUsed] =
GraphStages.withDetachedInputs(new Interleave[T](inputPorts, segmentSize, eagerClose))
}
@@ -477,26 +483,27 @@ final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerCl
}
in.foreach { i =>
- setHandler(i,
- new InHandler {
- override def onPush(): Unit = {
- push(out, grab(i))
- counter += 1
- if (counter == segmentSize) switchToNextInput()
- }
+ setHandler(
+ i,
+ new InHandler {
+ override def onPush(): Unit = {
+ push(out, grab(i))
+ counter += 1
+ if (counter == segmentSize) switchToNextInput()
+ }
- override def onUpstreamFinish(): Unit = {
- if (!eagerClose) {
- runningUpstreams -= 1
- if (!upstreamsClosed) {
- if (i == currentUpstream) {
- switchToNextInput()
- if (isAvailable(out)) pull(currentUpstream)
- }
- } else completeStage()
- } else completeStage()
- }
- })
+ override def onUpstreamFinish(): Unit = {
+ if (!eagerClose) {
+ runningUpstreams -= 1
+ if (!upstreamsClosed) {
+ if (i == currentUpstream) {
+ switchToNextInput()
+ if (isAvailable(out)) pull(currentUpstream)
+ }
+ } else completeStage()
+ } else completeStage()
+ }
+ })
}
def onPull(): Unit =
@@ -627,27 +634,28 @@ final class Broadcast[T](val outputPorts: Int, val eagerCancel: Boolean) extends
while (idx < size) {
val o = out(idx)
val i = idx // close over val
- setHandler(o,
- new OutHandler {
- override def onPull(): Unit = {
- pending(i) = false
- pendingCount -= 1
- tryPull()
- }
+ setHandler(
+ o,
+ new OutHandler {
+ override def onPull(): Unit = {
+ pending(i) = false
+ pendingCount -= 1
+ tryPull()
+ }
- override def onDownstreamFinish() = {
- if (eagerCancel) completeStage()
- else {
- downstreamsRunning -= 1
- if (downstreamsRunning == 0) completeStage()
- else if (pending(i)) {
- pending(i) = false
- pendingCount -= 1
- tryPull()
- }
- }
- }
- })
+ override def onDownstreamFinish() = {
+ if (eagerCancel) completeStage()
+ else {
+ downstreamsRunning -= 1
+ if (downstreamsRunning == 0) completeStage()
+ else if (pending(i)) {
+ pending(i) = false
+ pendingCount -= 1
+ tryPull()
+ }
+ }
+ }
+ })
idx += 1
}
}
@@ -716,27 +724,28 @@ private[stream] final class WireTap[T] extends GraphStage[FanOutShape2[T, T, T]]
})
// The 'tap' output can neither backpressure, nor cancel, the stage.
- setHandler(outTap,
- new OutHandler {
- override def onPull() = {
- pendingTap match {
- case Some(elem) =>
- push(outTap, elem)
- pendingTap = None
- case None => // no pending element to emit
- }
- }
+ setHandler(
+ outTap,
+ new OutHandler {
+ override def onPull() = {
+ pendingTap match {
+ case Some(elem) =>
+ push(outTap, elem)
+ pendingTap = None
+ case None => // no pending element to emit
+ }
+ }
- override def onDownstreamFinish(): Unit = {
- setHandler(in, new InHandler {
- override def onPush() = {
- push(outMain, grab(in))
- }
- })
- // Allow any outstanding element to be garbage-collected
- pendingTap = None
- }
- })
+ override def onDownstreamFinish(): Unit = {
+ setHandler(in, new InHandler {
+ override def onPush() = {
+ push(outMain, grab(in))
+ }
+ })
+ // Allow any outstanding element to be garbage-collected
+ pendingTap = None
+ }
+ })
}
override def toString = "WireTap"
}
@@ -816,40 +825,41 @@ final class Partition[T](val outputPorts: Int, val partitioner: T => Int, val ea
out.iterator.zipWithIndex.foreach {
case (o, idx) =>
- setHandler(o,
- new OutHandler {
- override def onPull() = {
- if (outPendingElem != null) {
- val elem = outPendingElem.asInstanceOf[T]
- if (idx == outPendingIdx) {
- push(o, elem)
- outPendingElem = null
- if (!isClosed(in)) {
- if (!hasBeenPulled(in)) {
- pull(in)
- }
- } else
- completeStage()
- }
- } else if (!hasBeenPulled(in))
- pull(in)
- }
+ setHandler(
+ o,
+ new OutHandler {
+ override def onPull() = {
+ if (outPendingElem != null) {
+ val elem = outPendingElem.asInstanceOf[T]
+ if (idx == outPendingIdx) {
+ push(o, elem)
+ outPendingElem = null
+ if (!isClosed(in)) {
+ if (!hasBeenPulled(in)) {
+ pull(in)
+ }
+ } else
+ completeStage()
+ }
+ } else if (!hasBeenPulled(in))
+ pull(in)
+ }
- override def onDownstreamFinish(): Unit =
- if (eagerCancel) completeStage()
- else {
- downstreamRunning -= 1
- if (downstreamRunning == 0)
- completeStage()
- else if (outPendingElem != null) {
- if (idx == outPendingIdx) {
- outPendingElem = null
- if (!hasBeenPulled(in))
- pull(in)
- }
- }
- }
- })
+ override def onDownstreamFinish(): Unit =
+ if (eagerCancel) completeStage()
+ else {
+ downstreamRunning -= 1
+ if (downstreamRunning == 0)
+ completeStage()
+ else if (outPendingElem != null) {
+ if (idx == outPendingIdx) {
+ outPendingElem = null
+ if (!hasBeenPulled(in))
+ pull(in)
+ }
+ }
+ }
+ })
}
}
@@ -930,40 +940,41 @@ final class Balance[T](val outputPorts: Int, val waitForAllDownstreams: Boolean,
setHandler(in, this)
out.foreach { o =>
- setHandler(o,
- new OutHandler {
- private var hasPulled = false
+ setHandler(
+ o,
+ new OutHandler {
+ private var hasPulled = false
- override def onPull(): Unit = {
- if (!hasPulled) {
- hasPulled = true
- if (needDownstreamPulls > 0) needDownstreamPulls -= 1
- }
+ override def onPull(): Unit = {
+ if (!hasPulled) {
+ hasPulled = true
+ if (needDownstreamPulls > 0) needDownstreamPulls -= 1
+ }
- if (needDownstreamPulls == 0) {
- if (isAvailable(in)) {
- if (noPending) {
- push(o, grab(in))
- }
- } else {
- if (!hasBeenPulled(in)) pull(in)
- pendingQueue.enqueue(o)
- }
- } else pendingQueue.enqueue(o)
- }
+ if (needDownstreamPulls == 0) {
+ if (isAvailable(in)) {
+ if (noPending) {
+ push(o, grab(in))
+ }
+ } else {
+ if (!hasBeenPulled(in)) pull(in)
+ pendingQueue.enqueue(o)
+ }
+ } else pendingQueue.enqueue(o)
+ }
- override def onDownstreamFinish() = {
- if (eagerCancel) completeStage()
- else {
- downstreamsRunning -= 1
- if (downstreamsRunning == 0) completeStage()
- else if (!hasPulled && needDownstreamPulls > 0) {
- needDownstreamPulls -= 1
- if (needDownstreamPulls == 0 && !hasBeenPulled(in)) pull(in)
- }
- }
- }
- })
+ override def onDownstreamFinish() = {
+ if (eagerCancel) completeStage()
+ else {
+ downstreamsRunning -= 1
+ if (downstreamsRunning == 0) completeStage()
+ else if (!hasPulled && needDownstreamPulls > 0) {
+ needDownstreamPulls -= 1
+ if (needDownstreamPulls == 0 && !hasBeenPulled(in)) pull(in)
+ }
+ }
+ }
+ })
}
}
@@ -1246,22 +1257,23 @@ final class Concat[T](val inputPorts: Int) extends GraphStage[UniformFanInShape[
while (idxx < size) {
val i = in(idxx)
val idx = idxx // close over val
- setHandler(i,
- new InHandler {
- override def onPush() = {
- push(out, grab(i))
- }
+ setHandler(
+ i,
+ new InHandler {
+ override def onPush() = {
+ push(out, grab(i))
+ }
- override def onUpstreamFinish() = {
- if (idx == activeStream) {
- activeStream += 1
- // Skip closed inputs
- while (activeStream < inputPorts && isClosed(in(activeStream))) activeStream += 1
- if (activeStream == inputPorts) completeStage()
- else if (isAvailable(out)) pull(in(activeStream))
- }
- }
- })
+ override def onUpstreamFinish() = {
+ if (idx == activeStream) {
+ activeStream += 1
+ // Skip closed inputs
+ while (activeStream < inputPorts && isClosed(in(activeStream))) activeStream += 1
+ if (activeStream == inputPorts) completeStage()
+ else if (isAvailable(out)) pull(in(activeStream))
+ }
+ }
+ })
idxx += 1
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala
index 70dec972a1..1b1e56cd17 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala
@@ -272,9 +272,10 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int)
// Make some noise
override def onUpstreamFailure(ex: Throwable): Unit = {
- throw new MergeHub.ProducerFailed("Upstream producer failed with exception, " +
- "removing from MergeHub now",
- ex)
+ throw new MergeHub.ProducerFailed(
+ "Upstream producer failed with exception, " +
+ "removing from MergeHub now",
+ ex)
}
private def onDemand(moreDemand: Long): Unit = {
@@ -773,9 +774,10 @@ object PartitionHub {
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
* is backpressured.
*/
- @ApiMayChange def statefulSink[T](partitioner: () => (ConsumerInfo, T) => Long,
- startAfterNrOfConsumers: Int,
- bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] =
+ @ApiMayChange def statefulSink[T](
+ partitioner: () => (ConsumerInfo, T) => Long,
+ startAfterNrOfConsumers: Int,
+ bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] =
Sink.fromGraph(new PartitionHub[T](partitioner, startAfterNrOfConsumers, bufferSize))
/**
@@ -808,9 +810,10 @@ object PartitionHub {
* is backpressured.
*/
@ApiMayChange
- def sink[T](partitioner: (Int, T) => Int,
- startAfterNrOfConsumers: Int,
- bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] = {
+ def sink[T](
+ partitioner: (Int, T) => Int,
+ startAfterNrOfConsumers: Int,
+ bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] = {
val fun: (ConsumerInfo, T) => Long = { (info, elem) =>
val idx = partitioner(info.size, elem)
if (idx < 0) -1L
@@ -1006,9 +1009,10 @@ object PartitionHub {
/**
* INTERNAL API
*/
-@InternalApi private[akka] class PartitionHub[T](partitioner: () => (PartitionHub.ConsumerInfo, T) => Long,
- startAfterNrOfConsumers: Int,
- bufferSize: Int)
+@InternalApi private[akka] class PartitionHub[T](
+ partitioner: () => (PartitionHub.ConsumerInfo, T) => Long,
+ startAfterNrOfConsumers: Int,
+ bufferSize: Int)
extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] {
import PartitionHub.Internal._
import PartitionHub.ConsumerInfo
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala
index 12566f092a..7277ba77a3 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala
@@ -55,22 +55,23 @@ final class MergeLatest[T, M](val inputPorts: Int, val eagerClose: Boolean)(buil
in.zipWithIndex.foreach {
case (input, index) =>
- setHandler(input,
- new InHandler {
- override def onPush(): Unit = {
- messages.update(index, grab(input))
- activeStreams.add(index)
- if (allMessagesReady) emit(out, buildElem(messages.asInstanceOf[Array[T]]))
- tryPull(input)
- }
+ setHandler(
+ input,
+ new InHandler {
+ override def onPush(): Unit = {
+ messages.update(index, grab(input))
+ activeStreams.add(index)
+ if (allMessagesReady) emit(out, buildElem(messages.asInstanceOf[Array[T]]))
+ tryPull(input)
+ }
- override def onUpstreamFinish(): Unit = {
- if (!eagerClose) {
- runningUpstreams -= 1
- if (upstreamsClosed) completeStage()
- } else completeStage()
- }
- })
+ override def onUpstreamFinish(): Unit = {
+ if (!eagerClose) {
+ runningUpstreams -= 1
+ if (upstreamsClosed) completeStage()
+ } else completeStage()
+ }
+ })
}
override def onPull(): Unit = {
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala
index 21387abda5..c60ac14273 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala
@@ -51,12 +51,13 @@ object RestartFlow {
def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(
flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = {
Flow.fromGraph(
- new RestartWithBackoffFlow(flowFactory,
- minBackoff,
- maxBackoff,
- randomFactor,
- onlyOnFailures = false,
- Int.MaxValue))
+ new RestartWithBackoffFlow(
+ flowFactory,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ onlyOnFailures = false,
+ Int.MaxValue))
}
/**
@@ -84,17 +85,19 @@ object RestartFlow {
* Passing `0` will cause no restarts and a negative number will not cap the amount of restarts.
* @param flowFactory A factory for producing the [[Flow]] to wrap.
*/
- def withBackoff[In, Out](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = {
+ def withBackoff[In, Out](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = {
Flow.fromGraph(
- new RestartWithBackoffFlow(flowFactory,
- minBackoff,
- maxBackoff,
- randomFactor,
- onlyOnFailures = false,
- maxRestarts))
+ new RestartWithBackoffFlow(
+ flowFactory,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ onlyOnFailures = false,
+ maxRestarts))
}
/**
@@ -123,22 +126,24 @@ object RestartFlow {
* Passing `0` will cause no restarts and a negative number will not cap the amount of restarts.
* @param flowFactory A factory for producing the [[Flow]] to wrap.
*/
- def onFailuresWithBackoff[In, Out](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = {
+ def onFailuresWithBackoff[In, Out](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = {
Flow.fromGraph(
new RestartWithBackoffFlow(flowFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = true, maxRestarts))
}
}
-private final class RestartWithBackoffFlow[In, Out](flowFactory: () => Flow[In, Out, _],
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- onlyOnFailures: Boolean,
- maxRestarts: Int)
+private final class RestartWithBackoffFlow[In, Out](
+ flowFactory: () => Flow[In, Out, _],
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ onlyOnFailures: Boolean,
+ maxRestarts: Int)
extends GraphStage[FlowShape[In, Out]] { self =>
val in = Inlet[In]("RestartWithBackoffFlow.in")
@@ -200,13 +205,14 @@ private final class RestartWithBackoffFlow[In, Out](flowFactory: () => Flow[In,
/**
* Shared logic for all restart with backoff logics.
*/
-private abstract class RestartWithBackoffLogic[S <: Shape](name: String,
- shape: S,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- onlyOnFailures: Boolean,
- maxRestarts: Int)
+private abstract class RestartWithBackoffLogic[S <: Shape](
+ name: String,
+ shape: S,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ onlyOnFailures: Boolean,
+ maxRestarts: Int)
extends TimerGraphStageLogicWithLogging(shape) {
var restartCount = 0
var resetDeadline = minBackoff.fromNow
@@ -291,20 +297,21 @@ private abstract class RestartWithBackoffLogic[S <: Shape](name: String,
}
})
- setHandler(in,
- new InHandler {
- override def onPush() = if (sourceOut.isAvailable) {
- sourceOut.push(grab(in))
- }
- override def onUpstreamFinish() = {
- finishing = true
- sourceOut.complete()
- }
- override def onUpstreamFailure(ex: Throwable) = {
- finishing = true
- sourceOut.fail(ex)
- }
- })
+ setHandler(
+ in,
+ new InHandler {
+ override def onPush() = if (sourceOut.isAvailable) {
+ sourceOut.push(grab(in))
+ }
+ override def onUpstreamFinish() = {
+ finishing = true
+ sourceOut.complete()
+ }
+ override def onUpstreamFailure(ex: Throwable) = {
+ finishing = true
+ sourceOut.fail(ex)
+ }
+ })
sourceOut
}
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala
index 0912355707..d84db3aec0 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala
@@ -80,24 +80,26 @@ object RestartSink {
}
}
-private final class RestartWithBackoffSink[T](sinkFactory: () => Sink[T, _],
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int)
+private final class RestartWithBackoffSink[T](
+ sinkFactory: () => Sink[T, _],
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int)
extends GraphStage[SinkShape[T]] { self =>
val in = Inlet[T]("RestartWithBackoffSink.in")
override def shape = SinkShape(in)
override def createLogic(inheritedAttributes: Attributes) =
- new RestartWithBackoffLogic("Sink",
- shape,
- minBackoff,
- maxBackoff,
- randomFactor,
- onlyOnFailures = false,
- maxRestarts) {
+ new RestartWithBackoffLogic(
+ "Sink",
+ shape,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ onlyOnFailures = false,
+ maxRestarts) {
override protected def logSource = self.getClass
override protected def startGraph() = {
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala
index 81adf95a77..95e3290546 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala
@@ -42,12 +42,13 @@ object RestartSource {
def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(
sourceFactory: () => Source[T, _]): Source[T, NotUsed] = {
Source.fromGraph(
- new RestartWithBackoffSource(sourceFactory,
- minBackoff,
- maxBackoff,
- randomFactor,
- onlyOnFailures = false,
- Int.MaxValue))
+ new RestartWithBackoffSource(
+ sourceFactory,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ onlyOnFailures = false,
+ Int.MaxValue))
}
/**
@@ -76,12 +77,13 @@ object RestartSource {
def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)(
sourceFactory: () => Source[T, _]): Source[T, NotUsed] = {
Source.fromGraph(
- new RestartWithBackoffSource(sourceFactory,
- minBackoff,
- maxBackoff,
- randomFactor,
- onlyOnFailures = false,
- maxRestarts))
+ new RestartWithBackoffSource(
+ sourceFactory,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ onlyOnFailures = false,
+ maxRestarts))
}
/**
@@ -107,12 +109,13 @@ object RestartSource {
def onFailuresWithBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(
sourceFactory: () => Source[T, _]): Source[T, NotUsed] = {
Source.fromGraph(
- new RestartWithBackoffSource(sourceFactory,
- minBackoff,
- maxBackoff,
- randomFactor,
- onlyOnFailures = true,
- Int.MaxValue))
+ new RestartWithBackoffSource(
+ sourceFactory,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ onlyOnFailures = true,
+ Int.MaxValue))
}
/**
@@ -138,26 +141,29 @@ object RestartSource {
* @param sourceFactory A factory for producing the [[Source]] to wrap.
*
*/
- def onFailuresWithBackoff[T](minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- maxRestarts: Int)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = {
+ def onFailuresWithBackoff[T](
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ maxRestarts: Int)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = {
Source.fromGraph(
- new RestartWithBackoffSource(sourceFactory,
- minBackoff,
- maxBackoff,
- randomFactor,
- onlyOnFailures = true,
- maxRestarts))
+ new RestartWithBackoffSource(
+ sourceFactory,
+ minBackoff,
+ maxBackoff,
+ randomFactor,
+ onlyOnFailures = true,
+ maxRestarts))
}
}
-private final class RestartWithBackoffSource[T](sourceFactory: () => Source[T, _],
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- onlyOnFailures: Boolean,
- maxRestarts: Int)
+private final class RestartWithBackoffSource[T](
+ sourceFactory: () => Source[T, _],
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ onlyOnFailures: Boolean,
+ maxRestarts: Int)
extends GraphStage[SourceShape[T]] { self =>
val out = Outlet[T]("RestartWithBackoffSource.out")
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala
index 70e9b355ca..93a282f9f8 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala
@@ -137,8 +137,9 @@ object Sink {
// behave as it is the stage with regards to attributes
val attrs = g.traversalBuilder.attributes
val noAttrStage = g.withAttributes(Attributes.none)
- new Sink(LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right),
- noAttrStage.shape).withAttributes(attrs)
+ new Sink(
+ LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right),
+ noAttrStage.shape).withAttributes(attrs)
case other =>
new Sink(LinearTraversalBuilder.fromBuilder(other.traversalBuilder, other.shape, Keep.right), other.shape)
@@ -427,9 +428,10 @@ object Sink {
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
* limiting operator in front of this `Sink`.
*/
- @InternalApi private[akka] def actorRef[T](ref: ActorRef,
- onCompleteMessage: Any,
- onFailureMessage: Throwable => Any): Sink[T, NotUsed] =
+ @InternalApi private[akka] def actorRef[T](
+ ref: ActorRef,
+ onCompleteMessage: Any,
+ onFailureMessage: Throwable => Any): Sink[T, NotUsed] =
fromGraph(
new ActorRefSink(ref, onCompleteMessage, onFailureMessage, DefaultAttributes.actorRefSink, shape("ActorRefSink")))
@@ -450,11 +452,12 @@ object Sink {
*/
def actorRef[T](ref: ActorRef, onCompleteMessage: Any): Sink[T, NotUsed] =
fromGraph(
- new ActorRefSink(ref,
- onCompleteMessage,
- t => Status.Failure(t),
- DefaultAttributes.actorRefSink,
- shape("ActorRefSink")))
+ new ActorRefSink(
+ ref,
+ onCompleteMessage,
+ t => Status.Failure(t),
+ DefaultAttributes.actorRefSink,
+ shape("ActorRefSink")))
/**
* INTERNAL API
@@ -476,19 +479,21 @@ object Sink {
* When the stream is completed with failure - result of `onFailureMessage(throwable)`
* function will be sent to the destination actor.
*/
- @InternalApi private[akka] def actorRefWithAck[T](ref: ActorRef,
- messageAdapter: ActorRef => T => Any,
- onInitMessage: ActorRef => Any,
- ackMessage: Any,
- onCompleteMessage: Any,
- onFailureMessage: (Throwable) => Any): Sink[T, NotUsed] =
+ @InternalApi private[akka] def actorRefWithAck[T](
+ ref: ActorRef,
+ messageAdapter: ActorRef => T => Any,
+ onInitMessage: ActorRef => Any,
+ ackMessage: Any,
+ onCompleteMessage: Any,
+ onFailureMessage: (Throwable) => Any): Sink[T, NotUsed] =
Sink.fromGraph(
- new ActorRefBackpressureSinkStage(ref,
- messageAdapter,
- onInitMessage,
- ackMessage,
- onCompleteMessage,
- onFailureMessage))
+ new ActorRefBackpressureSinkStage(
+ ref,
+ messageAdapter,
+ onInitMessage,
+ ackMessage,
+ onCompleteMessage,
+ onFailureMessage))
/**
* Sends the elements of the stream to the given `ActorRef` that sends back back-pressure signal.
@@ -504,11 +509,12 @@ object Sink {
* function will be sent to the destination actor.
*
*/
- def actorRefWithAck[T](ref: ActorRef,
- onInitMessage: Any,
- ackMessage: Any,
- onCompleteMessage: Any,
- onFailureMessage: (Throwable) => Any = Status.Failure): Sink[T, NotUsed] =
+ def actorRefWithAck[T](
+ ref: ActorRef,
+ onInitMessage: Any,
+ ackMessage: Any,
+ onCompleteMessage: Any,
+ onFailureMessage: (Throwable) => Any = Status.Failure): Sink[T, NotUsed] =
actorRefWithAck(ref, _ => identity, _ => onInitMessage, ackMessage, onCompleteMessage, onFailureMessage)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala
index 9003a7b524..d460a8a567 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala
@@ -32,8 +32,9 @@ import scala.compat.java8.FutureConverters._
* an “atomic” source, e.g. from a collection or a file. Materialization turns a Source into
* a Reactive Streams `Publisher` (at least conceptually).
*/
-final class Source[+Out, +Mat](override val traversalBuilder: LinearTraversalBuilder,
- override val shape: SourceShape[Out])
+final class Source[+Out, +Mat](
+ override val traversalBuilder: LinearTraversalBuilder,
+ override val shape: SourceShape[Out])
extends FlowOpsMat[Out, Mat]
with Graph[SourceShape[Out], Mat] {
@@ -55,14 +56,17 @@ final class Source[+Out, +Mat](override val traversalBuilder: LinearTraversalBui
this.asInstanceOf[Source[T, Mat3]] //Mat == Mat3, due to Keep.left
else if (combine == Keep.right || combine == Keep.none) // Mat3 = NotUsed
//optimization with LinearTraversalBuilder.empty()
- new Source[T, Mat3](traversalBuilder.append(LinearTraversalBuilder.empty(), flow.shape, combine),
- SourceShape(shape.out).asInstanceOf[SourceShape[T]])
+ new Source[T, Mat3](
+ traversalBuilder.append(LinearTraversalBuilder.empty(), flow.shape, combine),
+ SourceShape(shape.out).asInstanceOf[SourceShape[T]])
else
- new Source[T, Mat3](traversalBuilder.append(flow.traversalBuilder, flow.shape, combine),
- SourceShape(flow.shape.out))
+ new Source[T, Mat3](
+ traversalBuilder.append(flow.traversalBuilder, flow.shape, combine),
+ SourceShape(flow.shape.out))
else
- new Source[T, Mat3](traversalBuilder.append(flow.traversalBuilder, flow.shape, combine),
- SourceShape(flow.shape.out))
+ new Source[T, Mat3](
+ traversalBuilder.append(flow.traversalBuilder, flow.shape, combine),
+ SourceShape(flow.shape.out))
}
/**
@@ -283,8 +287,9 @@ object Source {
// behave as it is the stage with regards to attributes
val attrs = g.traversalBuilder.attributes
val noAttrStage = g.withAttributes(Attributes.none)
- new Source(LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right),
- noAttrStage.shape).withAttributes(attrs)
+ new Source(
+ LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right),
+ noAttrStage.shape).withAttributes(attrs)
case other =>
// composite source shaped graph
new Source(LinearTraversalBuilder.fromBuilder(other.traversalBuilder, other.shape, Keep.right), other.shape)
@@ -502,19 +507,21 @@ object Source {
* @param bufferSize The size of the buffer in element count
* @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
- @InternalApi private[akka] def actorRef[T](completionMatcher: PartialFunction[Any, Unit],
- failureMatcher: PartialFunction[Any, Throwable],
- bufferSize: Int,
- overflowStrategy: OverflowStrategy): Source[T, ActorRef] = {
+ @InternalApi private[akka] def actorRef[T](
+ completionMatcher: PartialFunction[Any, Unit],
+ failureMatcher: PartialFunction[Any, Throwable],
+ bufferSize: Int,
+ overflowStrategy: OverflowStrategy): Source[T, ActorRef] = {
require(bufferSize >= 0, "bufferSize must be greater than or equal to 0")
require(!overflowStrategy.isBackpressure, "Backpressure overflowStrategy not supported")
fromGraph(
- new ActorRefSource(completionMatcher,
- failureMatcher,
- bufferSize,
- overflowStrategy,
- DefaultAttributes.actorRefSource,
- shape("ActorRefSource")))
+ new ActorRefSource(
+ completionMatcher,
+ failureMatcher,
+ bufferSize,
+ overflowStrategy,
+ DefaultAttributes.actorRefSource,
+ shape("ActorRefSource")))
}
/**
@@ -691,9 +698,10 @@ object Source {
* is received. Stream calls close and completes when `Future` from read function returns None.
* @param close - function that closes resource
*/
- def unfoldResourceAsync[T, S](create: () => Future[S],
- read: (S) => Future[Option[T]],
- close: (S) => Future[Done]): Source[T, NotUsed] =
+ def unfoldResourceAsync[T, S](
+ create: () => Future[S],
+ read: (S) => Future[Option[T]],
+ close: (S) => Future[Done]): Source[T, NotUsed] =
Source.fromGraph(new UnfoldResourceSourceAsync(create, read, close))
}
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala
index 22fea0e9b9..bc29f7abfd 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala
@@ -71,12 +71,13 @@ object TLS {
* The SSLEngine may use this information e.g. when an endpoint identification algorithm was
* configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]].
*/
- def apply(sslContext: SSLContext,
- sslConfig: Option[AkkaSSLConfig],
- firstSession: NegotiateNewSession,
- role: TLSRole,
- closing: TLSClosing = IgnoreComplete,
- hostInfo: Option[(String, Int)] = None)
+ def apply(
+ sslContext: SSLContext,
+ sslConfig: Option[AkkaSSLConfig],
+ firstSession: NegotiateNewSession,
+ role: TLSRole,
+ closing: TLSClosing = IgnoreComplete,
+ hostInfo: Option[(String, Int)] = None)
: scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = {
def theSslConfig(system: ActorSystem): AkkaSSLConfig =
sslConfig.getOrElse(AkkaSSLConfig(system))
@@ -139,11 +140,12 @@ object TLS {
* The SSLEngine may use this information e.g. when an endpoint identification algorithm was
* configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]].
*/
- def apply(sslContext: SSLContext,
- firstSession: NegotiateNewSession,
- role: TLSRole,
- closing: TLSClosing,
- hostInfo: Option[(String, Int)])
+ def apply(
+ sslContext: SSLContext,
+ firstSession: NegotiateNewSession,
+ role: TLSRole,
+ closing: TLSClosing,
+ hostInfo: Option[(String, Int)])
: scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
apply(sslContext, None, firstSession, role, closing, hostInfo)
@@ -156,9 +158,10 @@ object TLS {
* that is not a requirement and depends entirely on the application
* protocol.
*/
- def apply(sslContext: SSLContext,
- firstSession: NegotiateNewSession,
- role: TLSRole): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def apply(
+ sslContext: SSLContext,
+ firstSession: NegotiateNewSession,
+ role: TLSRole): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
apply(sslContext, None, firstSession, role, IgnoreComplete, None)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala
index 5c4ec0d10c..a458623317 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala
@@ -47,9 +47,10 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider {
/**
* Represents an accepted incoming TCP connection.
*/
- final case class IncomingConnection(localAddress: InetSocketAddress,
- remoteAddress: InetSocketAddress,
- flow: Flow[ByteString, ByteString, NotUsed]) {
+ final case class IncomingConnection(
+ localAddress: InetSocketAddress,
+ remoteAddress: InetSocketAddress,
+ flow: Flow[ByteString, ByteString, NotUsed]) {
/**
* Handles the connection using the given flow, which is materialized exactly once and the respective
@@ -112,21 +113,23 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* independently whether the client is still attempting to write. This setting is recommended
* for servers, and therefore it is the default setting.
*/
- def bind(interface: String,
- port: Int,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- halfClose: Boolean = false,
- idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] =
+ def bind(
+ interface: String,
+ port: Int,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ halfClose: Boolean = false,
+ idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] =
Source.fromGraph(
- new ConnectionSourceStage(IO(IoTcp)(system),
- new InetSocketAddress(interface, port),
- backlog,
- options,
- halfClose,
- idleTimeout,
- bindShutdownTimeout,
- settings.ioSettings))
+ new ConnectionSourceStage(
+ IO(IoTcp)(system),
+ new InetSocketAddress(interface, port),
+ backlog,
+ options,
+ halfClose,
+ idleTimeout,
+ bindShutdownTimeout,
+ settings.ioSettings))
/**
* Creates a [[Tcp.ServerBinding]] instance which represents a prospective TCP server binding on the given `endpoint`
@@ -151,13 +154,14 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* independently whether the client is still attempting to write. This setting is recommended
* for servers, and therefore it is the default setting.
*/
- def bindAndHandle(handler: Flow[ByteString, ByteString, _],
- interface: String,
- port: Int,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- halfClose: Boolean = false,
- idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = {
+ def bindAndHandle(
+ handler: Flow[ByteString, ByteString, _],
+ interface: String,
+ port: Int,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ halfClose: Boolean = false,
+ idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = {
bind(interface, port, backlog, options, halfClose, idleTimeout)
.to(Sink.foreach { conn: IncomingConnection =>
conn.flow.join(handler).run()
@@ -195,13 +199,14 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
val tcpFlow = Flow
.fromGraph(
- new OutgoingConnectionStage(IO(IoTcp)(system),
- remoteAddress,
- localAddress,
- options,
- halfClose,
- connectTimeout,
- settings.ioSettings))
+ new OutgoingConnectionStage(
+ IO(IoTcp)(system),
+ remoteAddress,
+ localAddress,
+ options,
+ halfClose,
+ connectTimeout,
+ settings.ioSettings))
.via(detacher[ByteString]) // must read ahead for proper completions
idleTimeout match {
@@ -296,13 +301,14 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* Marked API-may-change to leave room for an improvement around the very long parameter list.
*/
@ApiMayChange
- def bindTls(interface: String,
- port: Int,
- sslContext: SSLContext,
- negotiateNewSession: NegotiateNewSession,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = {
+ def bindTls(
+ interface: String,
+ port: Int,
+ sslContext: SSLContext,
+ negotiateNewSession: NegotiateNewSession,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = {
val tls = tlsWrapping.atop(TLS(sslContext, negotiateNewSession, TLSRole.server)).reversed
@@ -342,14 +348,15 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* Marked API-may-change to leave room for an improvement around the very long parameter list.
*/
@ApiMayChange
- def bindAndHandleTls(handler: Flow[ByteString, ByteString, _],
- interface: String,
- port: Int,
- sslContext: SSLContext,
- negotiateNewSession: NegotiateNewSession,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = {
+ def bindAndHandleTls(
+ handler: Flow[ByteString, ByteString, _],
+ interface: String,
+ port: Int,
+ sslContext: SSLContext,
+ negotiateNewSession: NegotiateNewSession,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = {
bindTls(interface, port, sslContext, negotiateNewSession, backlog, options, idleTimeout)
.to(Sink.foreach { conn: IncomingConnection =>
conn.handleWith(handler)
diff --git a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala
index 138e067740..8bfe797d16 100644
--- a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala
+++ b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala
@@ -156,9 +156,10 @@ sealed trait ConnectionSnapshot {
* INTERNAL API
*/
@InternalApi
-final private[akka] case class StreamSnapshotImpl(self: ActorPath,
- activeInterpreters: Seq[RunningInterpreter],
- newShells: Seq[UninitializedInterpreter])
+final private[akka] case class StreamSnapshotImpl(
+ self: ActorPath,
+ activeInterpreters: Seq[RunningInterpreter],
+ newShells: Seq[UninitializedInterpreter])
extends StreamSnapshot
with HideImpl
@@ -173,11 +174,12 @@ private[akka] final case class UninitializedInterpreterImpl(logics: immutable.Se
* INTERNAL API
*/
@InternalApi
-private[akka] final case class RunningInterpreterImpl(logics: immutable.Seq[LogicSnapshot],
- connections: immutable.Seq[ConnectionSnapshot],
- queueStatus: String,
- runningLogicsCount: Int,
- stoppedLogics: immutable.Seq[LogicSnapshot])
+private[akka] final case class RunningInterpreterImpl(
+ logics: immutable.Seq[LogicSnapshot],
+ connections: immutable.Seq[ConnectionSnapshot],
+ queueStatus: String,
+ runningLogicsCount: Int,
+ stoppedLogics: immutable.Seq[LogicSnapshot])
extends RunningInterpreter
with HideImpl
@@ -193,10 +195,11 @@ private[akka] final case class LogicSnapshotImpl(index: Int, label: String, attr
* INTERNAL API
*/
@InternalApi
-private[akka] final case class ConnectionSnapshotImpl(id: Int,
- in: LogicSnapshot,
- out: LogicSnapshot,
- state: ConnectionSnapshot.ConnectionState)
+private[akka] final case class ConnectionSnapshotImpl(
+ id: Int,
+ in: LogicSnapshot,
+ out: LogicSnapshot,
+ state: ConnectionSnapshot.ConnectionState)
extends ConnectionSnapshot
with HideImpl
diff --git a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala
index 8d58ddf0f1..f8007e745c 100644
--- a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala
@@ -180,15 +180,17 @@ object GraphStageLogic {
*
* @param name leave empty to use plain auto generated names
*/
- final class StageActor(materializer: ActorMaterializer,
- getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)],
- initialReceive: StageActorRef.Receive,
- name: String) {
+ final class StageActor(
+ materializer: ActorMaterializer,
+ getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)],
+ initialReceive: StageActorRef.Receive,
+ name: String) {
// not really needed, but let's keep MiMa happy
- def this(materializer: akka.stream.ActorMaterializer,
- getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)],
- initialReceive: StageActorRef.Receive) {
+ def this(
+ materializer: akka.stream.ActorMaterializer,
+ getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)],
+ initialReceive: StageActorRef.Receive) {
this(materializer, getAsyncCallback, initialReceive, "")
}
@@ -693,10 +695,11 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
* for the given inlet if suspension is needed and reinstalls the current
* handler upon receiving the last `onPush()` signal (before invoking the `andThen` function).
*/
- final protected def readN[T](in: Inlet[T],
- n: Int,
- andThen: Procedure[java.util.List[T]],
- onClose: Procedure[java.util.List[T]]): Unit = {
+ final protected def readN[T](
+ in: Inlet[T],
+ n: Int,
+ andThen: Procedure[java.util.List[T]],
+ onClose: Procedure[java.util.List[T]]): Unit = {
//FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity
import collection.JavaConverters._
readN(in, n)(seq => andThen(seq.asJava), seq => onClose(seq.asJava))
@@ -751,8 +754,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
* Caution: for n == 1 andThen is called after resetting the handler, for
* other values it is called without resetting the handler. n MUST be positive.
*/
- private final class Reading[T](in: Inlet[T], private var n: Int, val previous: InHandler)(andThen: T => Unit,
- onComplete: () => Unit)
+ private final class Reading[T](in: Inlet[T], private var n: Int, val previous: InHandler)(
+ andThen: T => Unit,
+ onComplete: () => Unit)
extends InHandler {
require(n > 0, "number of elements to read must be positive!")
@@ -1002,11 +1006,12 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
* completion or failure of the given inlet shall lead to operator termination or not.
* `doPull` instructs to perform one initial pull on the `from` port.
*/
- final protected def passAlong[Out, In <: Out](from: Inlet[In],
- to: Outlet[Out],
- doFinish: Boolean = true,
- doFail: Boolean = true,
- doPull: Boolean = false): Unit = {
+ final protected def passAlong[Out, In <: Out](
+ from: Inlet[In],
+ to: Outlet[Out],
+ doFinish: Boolean = true,
+ doFail: Boolean = true,
+ doPull: Boolean = false): Unit = {
class PassAlongHandler extends InHandler with (() => Unit) {
override def apply(): Unit = tryPull(from)
@@ -1521,9 +1526,10 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap
* Any existing timer with the same key will automatically be canceled before
* adding the new timer.
*/
- final protected def schedulePeriodicallyWithInitialDelay(timerKey: Any,
- initialDelay: FiniteDuration,
- interval: FiniteDuration): Unit = {
+ final protected def schedulePeriodicallyWithInitialDelay(
+ timerKey: Any,
+ initialDelay: FiniteDuration,
+ interval: FiniteDuration): Unit = {
cancelTimer(timerKey)
val id = timerIdGen.next()
val task = interpreter.materializer.schedulePeriodically(initialDelay, interval, new Runnable {
@@ -1538,9 +1544,10 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap
* Any existing timer with the same key will automatically be canceled before
* adding the new timer.
*/
- final protected def schedulePeriodicallyWithInitialDelay(timerKey: Any,
- initialDelay: java.time.Duration,
- interval: java.time.Duration): Unit = {
+ final protected def schedulePeriodicallyWithInitialDelay(
+ timerKey: Any,
+ initialDelay: java.time.Duration,
+ interval: java.time.Duration): Unit = {
import akka.util.JavaDurationConverters._
schedulePeriodicallyWithInitialDelay(timerKey, initialDelay.asScala, interval.asScala)
}
diff --git a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala
index 35e9dd3383..5ce3cb5a9e 100644
--- a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala
+++ b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala
@@ -16,9 +16,10 @@ trait SSLEngineConfigurator {
def configure(engine: SSLEngine, sslContext: SSLContext): SSLEngine
}
-final class DefaultSSLEngineConfigurator(config: SSLConfigSettings,
- enabledProtocols: Array[String],
- enabledCipherSuites: Array[String])
+final class DefaultSSLEngineConfigurator(
+ config: SSLConfigSettings,
+ enabledProtocols: Array[String],
+ enabledCipherSuites: Array[String])
extends SSLEngineConfigurator {
def configure(engine: SSLEngine, sslContext: SSLContext): SSLEngine = {
engine.setSSLParameters(sslContext.getDefaultSSLParameters)
diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
index 21660a7838..dafa5d8737 100644
--- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
@@ -164,9 +164,10 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte
protected[akka] override def throughput = 0
protected[akka] override def throughputDeadlineTime = Duration.Zero
- protected[akka] override def registerForExecution(mbox: Mailbox,
- hasMessageHint: Boolean,
- hasSystemMessageHint: Boolean): Boolean = false
+ protected[akka] override def registerForExecution(
+ mbox: Mailbox,
+ hasMessageHint: Boolean,
+ hasSystemMessageHint: Boolean): Boolean = false
protected[akka] override def shutdownTimeout = 1 second
@@ -245,9 +246,10 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte
* it is suspendSwitch and resumed.
*/
@tailrec
- private def runQueue(mbox: CallingThreadMailbox,
- queue: MessageQueue,
- interruptedEx: InterruptedException = null): Unit = {
+ private def runQueue(
+ mbox: CallingThreadMailbox,
+ queue: MessageQueue,
+ interruptedEx: InterruptedException = null): Unit = {
def checkThreadInterruption(intEx: InterruptedException): InterruptedException = {
if (Thread.interrupted()) { // clear interrupted flag before we continue, exception will be thrown later
val ie = new InterruptedException("Interrupted during message processing")
diff --git a/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala b/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala
index 98beee3e82..e20e068d20 100644
--- a/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala
@@ -89,9 +89,10 @@ class ExplicitlyTriggeredScheduler(@unused config: Config, log: LoggingAdapter,
}
}
- private def schedule(initialDelay: FiniteDuration,
- interval: Option[FiniteDuration],
- runnable: Runnable): Cancellable = {
+ private def schedule(
+ initialDelay: FiniteDuration,
+ interval: Option[FiniteDuration],
+ runnable: Runnable): Cancellable = {
val firstTime = currentTime.get + initialDelay.toMillis
val item = Item(firstTime, interval, runnable)
log.debug("Scheduled item for {}: {}", firstTime, item)
diff --git a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala
index 173409f1a8..b8e23e4e67 100644
--- a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala
@@ -73,9 +73,10 @@ object SocketUtil {
def temporaryServerAddress(address: String = RANDOM_LOOPBACK_ADDRESS, udp: Boolean = false): InetSocketAddress =
temporaryServerAddresses(1, address, udp).head
- def temporaryServerAddresses(numberOfAddresses: Int,
- hostname: String = RANDOM_LOOPBACK_ADDRESS,
- udp: Boolean = false): immutable.IndexedSeq[InetSocketAddress] = {
+ def temporaryServerAddresses(
+ numberOfAddresses: Int,
+ hostname: String = RANDOM_LOOPBACK_ADDRESS,
+ udp: Boolean = false): immutable.IndexedSeq[InetSocketAddress] = {
Vector
.fill(numberOfAddresses) {
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
index d8ce9b1710..acf6143f59 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
@@ -39,27 +39,30 @@ class TestActorRef[T <: Actor](_system: ActorSystem, _props: Props, _supervisor:
o.getClass)
}
case s =>
- _system.log.error("trying to attach child {} to unknown type of supervisor {}, this is not going to end well",
- name,
- s.getClass)
+ _system.log.error(
+ "trying to attach child {} to unknown type of supervisor {}, this is not going to end well",
+ name,
+ s.getClass)
}
-} with LocalActorRef(_system.asInstanceOf[ActorSystemImpl],
- props,
- dispatcher,
- _system.mailboxes.getMailboxType(props, dispatcher.configurator.config),
- _supervisor.asInstanceOf[InternalActorRef],
- _supervisor.path / name) {
+} with LocalActorRef(
+ _system.asInstanceOf[ActorSystemImpl],
+ props,
+ dispatcher,
+ _system.mailboxes.getMailboxType(props, dispatcher.configurator.config),
+ _supervisor.asInstanceOf[InternalActorRef],
+ _supervisor.path / name) {
// we need to start ourselves since the creation of an actor has been split into initialization and starting
underlying.start()
import TestActorRef.InternalGetActor
- protected override def newActorCell(system: ActorSystemImpl,
- ref: InternalActorRef,
- props: Props,
- dispatcher: MessageDispatcher,
- supervisor: InternalActorRef): ActorCell =
+ protected override def newActorCell(
+ system: ActorSystemImpl,
+ ref: InternalActorRef,
+ props: Props,
+ dispatcher: MessageDispatcher,
+ supervisor: InternalActorRef): ActorCell =
new ActorCell(system, ref, props, dispatcher, supervisor) {
override def autoReceiveMessage(msg: Envelope): Unit = {
msg.message match {
@@ -162,12 +165,13 @@ object TestActorRef {
private def dynamicCreateRecover[U]: PartialFunction[Throwable, U] = {
case exception =>
- throw ActorInitializationException(null,
- "Could not instantiate Actor" +
- "\nMake sure Actor is NOT defined inside a class/trait," +
- "\nif so put it outside the class/trait, f.e. in a companion object," +
- "\nOR try to change: 'actorOf(Props[MyActor]' to 'actorOf(Props(new MyActor)'.",
- exception)
+ throw ActorInitializationException(
+ null,
+ "Could not instantiate Actor" +
+ "\nMake sure Actor is NOT defined inside a class/trait," +
+ "\nif so put it outside the class/trait, f.e. in a companion object," +
+ "\nOR try to change: 'actorOf(Props[MyActor]' to 'actorOf(Props(new MyActor)'.",
+ exception)
}
def apply[T <: Actor](name: String)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] =
@@ -190,8 +194,9 @@ object TestActorRef {
.get
}), supervisor)
- def apply[T <: Actor](supervisor: ActorRef, name: String)(implicit t: ClassTag[T],
- system: ActorSystem): TestActorRef[T] =
+ def apply[T <: Actor](supervisor: ActorRef, name: String)(
+ implicit t: ClassTag[T],
+ system: ActorSystem): TestActorRef[T] =
apply[T](
Props({
system
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala
index f6ba42410e..8b038aaf20 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala
@@ -100,9 +100,10 @@ abstract class EventFilter(occurrences: Int) {
* `occurrences` parameter specifies.
*/
def assertDone(max: Duration): Unit =
- assert(awaitDone(max),
- if (todo > 0) s"$todo messages outstanding on $this"
- else s"received ${-todo} excess messages on $this")
+ assert(
+ awaitDone(max),
+ if (todo > 0) s"$todo messages outstanding on $this"
+ else s"received ${-todo} excess messages on $this")
/**
* Apply this filter while executing the given code block. Care is taken to
@@ -176,28 +177,32 @@ object EventFilter {
* `null` does NOT work (passing `null` disables the
* source filter).''
*/
- def apply[A <: Throwable: ClassTag](message: String = null,
- source: String = null,
- start: String = "",
- pattern: String = null,
- occurrences: Int = Int.MaxValue): EventFilter =
- ErrorFilter(implicitly[ClassTag[A]].runtimeClass,
- Option(source),
- if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
- message ne null)(occurrences)
+ def apply[A <: Throwable: ClassTag](
+ message: String = null,
+ source: String = null,
+ start: String = "",
+ pattern: String = null,
+ occurrences: Int = Int.MaxValue): EventFilter =
+ ErrorFilter(
+ implicitly[ClassTag[A]].runtimeClass,
+ Option(source),
+ if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
+ message ne null)(occurrences)
/**
* Create a filter for Error events. See apply() for more details.
*/
- def error(message: String = null,
- source: String = null,
- start: String = "",
- pattern: String = null,
- occurrences: Int = Int.MaxValue): EventFilter =
- ErrorFilter(Logging.Error.NoCause.getClass,
- Option(source),
- if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
- message ne null)(occurrences)
+ def error(
+ message: String = null,
+ source: String = null,
+ start: String = "",
+ pattern: String = null,
+ occurrences: Int = Int.MaxValue): EventFilter =
+ ErrorFilter(
+ Logging.Error.NoCause.getClass,
+ Option(source),
+ if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
+ message ne null)(occurrences)
/**
* Create a filter for Warning events. Give up to one of start and pattern:
@@ -213,14 +218,16 @@ object EventFilter {
* `null` does NOT work (passing `null` disables the
* source filter).''
*/
- def warning(message: String = null,
- source: String = null,
- start: String = "",
- pattern: String = null,
- occurrences: Int = Int.MaxValue): EventFilter =
- WarningFilter(Option(source),
- if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
- message ne null)(occurrences)
+ def warning(
+ message: String = null,
+ source: String = null,
+ start: String = "",
+ pattern: String = null,
+ occurrences: Int = Int.MaxValue): EventFilter =
+ WarningFilter(
+ Option(source),
+ if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
+ message ne null)(occurrences)
/**
* Create a filter for Info events. Give up to one of start and pattern:
@@ -236,14 +243,16 @@ object EventFilter {
* `null` does NOT work (passing `null` disables the
* source filter).''
*/
- def info(message: String = null,
- source: String = null,
- start: String = "",
- pattern: String = null,
- occurrences: Int = Int.MaxValue): EventFilter =
- InfoFilter(Option(source),
- if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
- message ne null)(occurrences)
+ def info(
+ message: String = null,
+ source: String = null,
+ start: String = "",
+ pattern: String = null,
+ occurrences: Int = Int.MaxValue): EventFilter =
+ InfoFilter(
+ Option(source),
+ if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
+ message ne null)(occurrences)
/**
* Create a filter for Debug events. Give up to one of start and pattern:
@@ -259,14 +268,16 @@ object EventFilter {
* `null` does NOT work (passing `null` disables the
* source filter).''
*/
- def debug(message: String = null,
- source: String = null,
- start: String = "",
- pattern: String = null,
- occurrences: Int = Int.MaxValue): EventFilter =
- DebugFilter(Option(source),
- if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
- message ne null)(occurrences)
+ def debug(
+ message: String = null,
+ source: String = null,
+ start: String = "",
+ pattern: String = null,
+ occurrences: Int = Int.MaxValue): EventFilter =
+ DebugFilter(
+ Option(source),
+ if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start),
+ message ne null)(occurrences)
/**
* Create a custom event filter. The filter will affect those events for
@@ -297,10 +308,11 @@ object EventFilter {
*
* If you want to match all Error events, the most efficient is to use Left("").
*/
-final case class ErrorFilter(throwable: Class[_],
- override val source: Option[String],
- override val message: Either[String, Regex],
- override val complete: Boolean)(occurrences: Int)
+final case class ErrorFilter(
+ throwable: Class[_],
+ override val source: Option[String],
+ override val message: Either[String, Regex],
+ override val complete: Boolean)(occurrences: Int)
extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -327,18 +339,20 @@ final case class ErrorFilter(throwable: Class[_],
* @param complete
* whether the event’s message must match the given message string or pattern completely
*/
- def this(throwable: Class[_],
- source: String,
- message: String,
- pattern: Boolean,
- complete: Boolean,
- occurrences: Int) =
- this(throwable,
- Option(source),
- if (message eq null) Left("")
- else if (pattern) Right(new Regex(message))
- else Left(message),
- complete)(occurrences)
+ def this(
+ throwable: Class[_],
+ source: String,
+ message: String,
+ pattern: Boolean,
+ complete: Boolean,
+ occurrences: Int) =
+ this(
+ throwable,
+ Option(source),
+ if (message eq null) Left("")
+ else if (pattern) Right(new Regex(message))
+ else Left(message),
+ complete)(occurrences)
/**
* Java API: filter only on the given type of exception
@@ -355,9 +369,10 @@ final case class ErrorFilter(throwable: Class[_],
*
* If you want to match all Warning events, the most efficient is to use Left("").
*/
-final case class WarningFilter(override val source: Option[String],
- override val message: Either[String, Regex],
- override val complete: Boolean)(occurrences: Int)
+final case class WarningFilter(
+ override val source: Option[String],
+ override val message: Either[String, Regex],
+ override val complete: Boolean)(occurrences: Int)
extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -383,11 +398,12 @@ final case class WarningFilter(override val source: Option[String],
* whether the event’s message must match the given message string or pattern completely
*/
def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) =
- this(Option(source),
- if (message eq null) Left("")
- else if (pattern) Right(new Regex(message))
- else Left(message),
- complete)(occurrences)
+ this(
+ Option(source),
+ if (message eq null) Left("")
+ else if (pattern) Right(new Regex(message))
+ else Left(message),
+ complete)(occurrences)
}
/**
@@ -398,9 +414,10 @@ final case class WarningFilter(override val source: Option[String],
*
* If you want to match all Info events, the most efficient is to use Left("").
*/
-final case class InfoFilter(override val source: Option[String],
- override val message: Either[String, Regex],
- override val complete: Boolean)(occurrences: Int)
+final case class InfoFilter(
+ override val source: Option[String],
+ override val message: Either[String, Regex],
+ override val complete: Boolean)(occurrences: Int)
extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -426,11 +443,12 @@ final case class InfoFilter(override val source: Option[String],
* whether the event’s message must match the given message string or pattern completely
*/
def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) =
- this(Option(source),
- if (message eq null) Left("")
- else if (pattern) Right(new Regex(message))
- else Left(message),
- complete)(occurrences)
+ this(
+ Option(source),
+ if (message eq null) Left("")
+ else if (pattern) Right(new Regex(message))
+ else Left(message),
+ complete)(occurrences)
}
/**
@@ -441,9 +459,10 @@ final case class InfoFilter(override val source: Option[String],
*
* If you want to match all Debug events, the most efficient is to use Left("").
*/
-final case class DebugFilter(override val source: Option[String],
- override val message: Either[String, Regex],
- override val complete: Boolean)(occurrences: Int)
+final case class DebugFilter(
+ override val source: Option[String],
+ override val message: Either[String, Regex],
+ override val complete: Boolean)(occurrences: Int)
extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -469,11 +488,12 @@ final case class DebugFilter(override val source: Option[String],
* whether the event’s message must match the given message string or pattern completely
*/
def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) =
- this(Option(source),
- if (message eq null) Left("")
- else if (pattern) Right(new Regex(message))
- else Left(message),
- complete)(occurrences)
+ this(
+ Option(source),
+ if (message eq null) Left("")
+ else if (pattern) Right(new Regex(message))
+ else Left(message),
+ complete)(occurrences)
}
/**
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala
index 4d0a1f255a..f886bbbaf0 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala
@@ -54,10 +54,11 @@ class TestFSMRef[S, D, T <: Actor](system: ActorSystem, props: Props, supervisor
* corresponding transition initiated from within the FSM, including timeout
* and stop handling.
*/
- def setState(stateName: S = fsm.stateName,
- stateData: D = fsm.stateData,
- timeout: FiniteDuration = null,
- stopReason: Option[FSM.Reason] = None): Unit = {
+ def setState(
+ stateName: S = fsm.stateName,
+ stateData: D = fsm.stateData,
+ timeout: FiniteDuration = null,
+ stopReason: Option[FSM.Reason] = None): Unit = {
fsm.applyState(FSM.State(stateName, stateData, Option(timeout), stopReason))
}
@@ -86,14 +87,15 @@ class TestFSMRef[S, D, T <: Actor](system: ActorSystem, props: Props, supervisor
object TestFSMRef {
- def apply[S, D, T <: Actor: ClassTag](factory: => T)(implicit ev: T <:< FSM[S, D],
- system: ActorSystem): TestFSMRef[S, D, T] = {
+ def apply[S, D, T <: Actor: ClassTag](
+ factory: => T)(implicit ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = {
val impl = system.asInstanceOf[ActorSystemImpl]
new TestFSMRef(impl, Props(factory), impl.guardian.asInstanceOf[InternalActorRef], TestActorRef.randomName)
}
- def apply[S, D, T <: Actor: ClassTag](factory: => T, name: String)(implicit ev: T <:< FSM[S, D],
- system: ActorSystem): TestFSMRef[S, D, T] = {
+ def apply[S, D, T <: Actor: ClassTag](factory: => T, name: String)(
+ implicit ev: T <:< FSM[S, D],
+ system: ActorSystem): TestFSMRef[S, D, T] = {
val impl = system.asInstanceOf[ActorSystemImpl]
new TestFSMRef(impl, Props(factory), impl.guardian.asInstanceOf[InternalActorRef], name)
}
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala
index 5b72aef4d0..e3e7dd8b45 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala
@@ -81,20 +81,22 @@ object TestActor {
delegates -= child
}
- override def processFailure(context: ActorContext,
- restart: Boolean,
- child: ActorRef,
- cause: Throwable,
- stats: ChildRestartStats,
- children: Iterable[ChildRestartStats]): Unit = {
+ override def processFailure(
+ context: ActorContext,
+ restart: Boolean,
+ child: ActorRef,
+ cause: Throwable,
+ stats: ChildRestartStats,
+ children: Iterable[ChildRestartStats]): Unit = {
delegate(child).processFailure(context, restart, child, cause, stats, children)
}
- override def handleFailure(context: ActorContext,
- child: ActorRef,
- cause: Throwable,
- stats: ChildRestartStats,
- children: Iterable[ChildRestartStats]): Boolean = {
+ override def handleFailure(
+ context: ActorContext,
+ child: ActorRef,
+ cause: Throwable,
+ stats: ChildRestartStats,
+ children: Iterable[ChildRestartStats]): Boolean = {
delegate(child).handleFailure(context, child, cause, stats, children)
}
}
@@ -176,8 +178,9 @@ trait TestKitBase {
*/
val testActor: ActorRef = {
val impl = system.asInstanceOf[ExtendedActorSystem]
- val ref = impl.systemActorOf(TestActor.props(queue).withDispatcher(CallingThreadDispatcher.Id),
- "%s-%d".format(testActorName, TestKit.testActorId.incrementAndGet))
+ val ref = impl.systemActorOf(
+ TestActor.props(queue).withDispatcher(CallingThreadDispatcher.Id),
+ "%s-%d".format(testActorName, TestKit.testActorId.incrementAndGet))
awaitCond(ref match {
case r: RepointableRef => r.isStarted
case _ => true
@@ -280,10 +283,11 @@ trait TestKitBase {
* Note that the timeout is scaled using Duration.dilated,
* which uses the configuration entry "akka.test.timefactor".
*/
- def awaitCond(p: => Boolean,
- max: Duration = Duration.Undefined,
- interval: Duration = 100.millis,
- message: String = ""): Unit = {
+ def awaitCond(
+ p: => Boolean,
+ max: Duration = Duration.Undefined,
+ interval: Duration = 100.millis,
+ message: String = ""): Unit = {
val _max = remainingOrDilated(max)
val stop = now + _max
@@ -591,13 +595,15 @@ trait TestKitBase {
*/
def expectMsgAllOf[T](max: FiniteDuration, obj: T*): immutable.Seq[T] = expectMsgAllOf_internal(max.dilated, obj: _*)
- private def checkMissingAndUnexpected(missing: Seq[Any],
- unexpected: Seq[Any],
- missingMessage: String,
- unexpectedMessage: String): Unit = {
- assert(missing.isEmpty && unexpected.isEmpty,
- (if (missing.isEmpty) "" else missing.mkString(missingMessage + " [", ", ", "] ")) +
- (if (unexpected.isEmpty) "" else unexpected.mkString(unexpectedMessage + " [", ", ", "]")))
+ private def checkMissingAndUnexpected(
+ missing: Seq[Any],
+ unexpected: Seq[Any],
+ missingMessage: String,
+ unexpectedMessage: String): Unit = {
+ assert(
+ missing.isEmpty && unexpected.isEmpty,
+ (if (missing.isEmpty) "" else missing.mkString(missingMessage + " [", ", ", "] ")) +
+ (if (unexpected.isEmpty) "" else unexpected.mkString(unexpectedMessage + " [", ", ", "]")))
}
private def expectMsgAllOf_internal[T](max: FiniteDuration, obj: T*): immutable.Seq[T] = {
@@ -822,9 +828,10 @@ trait TestKitBase {
*
* If verifySystemShutdown is true, then an exception will be thrown on failure.
*/
- def shutdown(actorSystem: ActorSystem = system,
- duration: Duration = 10.seconds.dilated.min(10.seconds),
- verifySystemShutdown: Boolean = false): Unit = {
+ def shutdown(
+ actorSystem: ActorSystem = system,
+ duration: Duration = 10.seconds.dilated.min(10.seconds),
+ verifySystemShutdown: Boolean = false): Unit = {
TestKit.shutdownActorSystem(actorSystem, duration, verifySystemShutdown)
}
@@ -954,16 +961,18 @@ object TestKit {
*
* If verifySystemShutdown is true, then an exception will be thrown on failure.
*/
- def shutdownActorSystem(actorSystem: ActorSystem,
- duration: Duration = 10.seconds,
- verifySystemShutdown: Boolean = false): Unit = {
+ def shutdownActorSystem(
+ actorSystem: ActorSystem,
+ duration: Duration = 10.seconds,
+ verifySystemShutdown: Boolean = false): Unit = {
actorSystem.terminate()
try Await.ready(actorSystem.whenTerminated, duration)
catch {
case _: TimeoutException =>
- val msg = "Failed to stop [%s] within [%s] \n%s".format(actorSystem.name,
- duration,
- actorSystem.asInstanceOf[ActorSystemImpl].printTree)
+ val msg = "Failed to stop [%s] within [%s] \n%s".format(
+ actorSystem.name,
+ duration,
+ actorSystem.asInstanceOf[ActorSystemImpl].printTree)
if (verifySystemShutdown) throw new RuntimeException(msg)
else println(msg)
}
diff --git a/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala b/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala
index 904f03dffd..8c95d4212b 100644
--- a/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala
@@ -12,8 +12,9 @@ import akka.testkit.{ DebugFilter, ErrorFilter, InfoFilter, WarningFilter }
class EventFilter(clazz: Class[_], system: ActorSystem) {
- require(classOf[Throwable].isAssignableFrom(clazz) || classOf[Logging.LogEvent].isAssignableFrom(clazz),
- "supplied class must either be LogEvent or Throwable")
+ require(
+ classOf[Throwable].isAssignableFrom(clazz) || classOf[Logging.LogEvent].isAssignableFrom(clazz),
+ "supplied class must either be LogEvent or Throwable")
private val _clazz: Class[_ <: Logging.LogEvent] =
if (classOf[Throwable].isAssignableFrom(clazz))
diff --git a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala
index 5d0a6def7b..57fb507d4e 100644
--- a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala
@@ -791,10 +791,11 @@ class TestKit(system: ActorSystem) {
* certain characteristics are generated at a certain rate:
*
*/
- def receiveWhile[T](max: java.time.Duration,
- idle: java.time.Duration,
- messages: Int,
- f: JFunction[AnyRef, T]): JList[T] = {
+ def receiveWhile[T](
+ max: java.time.Duration,
+ idle: java.time.Duration,
+ messages: Int,
+ f: JFunction[AnyRef, T]): JList[T] = {
tp.receiveWhile(max.asScala, idle.asScala, messages)(new CachingPartialFunction[AnyRef, T] {
@throws(classOf[Exception])
override def `match`(x: AnyRef): T = f.apply(x)
diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala
index 88e2d418ed..678f8f55ab 100644
--- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala
@@ -34,10 +34,11 @@ class AkkaSpecSpec extends WordSpec with Matchers {
"terminate all actors" in {
// verbose config just for demonstration purposes, please leave in in case of debugging
import scala.collection.JavaConverters._
- val conf = Map("akka.actor.debug.lifecycle" -> true,
- "akka.actor.debug.event-stream" -> true,
- "akka.loglevel" -> "DEBUG",
- "akka.stdout-loglevel" -> "DEBUG")
+ val conf = Map(
+ "akka.actor.debug.lifecycle" -> true,
+ "akka.actor.debug.event-stream" -> true,
+ "akka.loglevel" -> "DEBUG",
+ "akka.stdout-loglevel" -> "DEBUG")
val system = ActorSystem("AkkaSpec1", ConfigFactory.parseMap(conf.asJava).withFallback(AkkaSpec.testConf))
var refs = Seq.empty[ActorRef]
val spec = new AkkaSpec(system) { refs = Seq(testActor, system.actorOf(Props.empty, "name")) }
diff --git a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala
index 1647984054..5ddd2dd9c4 100644
--- a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala
@@ -79,11 +79,12 @@ object Coroner {
* If displayThreadCounts is set to true, then the Coroner will print thread counts during start
* and stop.
*/
- def watch(duration: FiniteDuration,
- reportTitle: String,
- out: PrintStream,
- startAndStopDuration: FiniteDuration = defaultStartAndStopDuration,
- displayThreadCounts: Boolean = false): WatchHandle = {
+ def watch(
+ duration: FiniteDuration,
+ reportTitle: String,
+ out: PrintStream,
+ startAndStopDuration: FiniteDuration = defaultStartAndStopDuration,
+ displayThreadCounts: Boolean = false): WatchHandle = {
val watchedHandle = new WatchHandleImpl(startAndStopDuration)
@@ -256,11 +257,12 @@ trait WatchedByCoroner {
@volatile private var coronerWatch: Coroner.WatchHandle = _
final def startCoroner(): Unit = {
- coronerWatch = Coroner.watch(expectedTestDuration.dilated,
- getClass.getName,
- System.err,
- startAndStopDuration.dilated,
- displayThreadCounts)
+ coronerWatch = Coroner.watch(
+ expectedTestDuration.dilated,
+ getClass.getName,
+ System.err,
+ startAndStopDuration.dilated,
+ displayThreadCounts)
}
final def stopCoroner(): Unit = {
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
index 3b1c6030ff..eb83123026 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
@@ -16,9 +16,10 @@ import org.{ HdrHistogram => hdr }
* maintain value resolution and separation. Must be a non-negative
* integer between 0 and 5.
*/
-private[akka] class HdrHistogram(highestTrackableValue: Long,
- numberOfSignificantValueDigits: Int,
- val unit: String = "")
+private[akka] class HdrHistogram(
+ highestTrackableValue: Long,
+ numberOfSignificantValueDigits: Int,
+ val unit: String = "")
extends Metric {
private val hist = new hdr.Histogram(highestTrackableValue, numberOfSignificantValueDigits)
@@ -37,11 +38,13 @@ private[akka] class HdrHistogram(highestTrackableValue: Long,
}
}
- private def wrapHistogramOutOfBoundsException(value: Long,
- ex: ArrayIndexOutOfBoundsException): IllegalArgumentException =
- new IllegalArgumentException(s"Given value $value can not be stored in this histogram " +
- s"(min: ${hist.getLowestDiscernibleValue}, max: ${hist.getHighestTrackableValue}})",
- ex)
+ private def wrapHistogramOutOfBoundsException(
+ value: Long,
+ ex: ArrayIndexOutOfBoundsException): IllegalArgumentException =
+ new IllegalArgumentException(
+ s"Given value $value can not be stored in this histogram " +
+ s"(min: ${hist.getLowestDiscernibleValue}, max: ${hist.getHighestTrackableValue}})",
+ ex)
def getData = hist.copy()
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala
index 3059309f30..1e545e7ee1 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala
@@ -14,28 +14,31 @@ private[akka] trait MemoryUsageSnapshotting extends MetricsPrefix {
def getHeapSnapshot = {
val metrics = getMetrics
- HeapMemoryUsage(metrics.get(key("heap-init")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("heap-used")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("heap-max")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("heap-committed")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("heap-usage")).asInstanceOf[RatioGauge].getValue)
+ HeapMemoryUsage(
+ metrics.get(key("heap-init")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("heap-used")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("heap-max")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("heap-committed")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("heap-usage")).asInstanceOf[RatioGauge].getValue)
}
def getTotalSnapshot = {
val metrics = getMetrics
- TotalMemoryUsage(metrics.get(key("total-init")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("total-used")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("total-max")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("total-committed")).asInstanceOf[Gauge[Long]].getValue)
+ TotalMemoryUsage(
+ metrics.get(key("total-init")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("total-used")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("total-max")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("total-committed")).asInstanceOf[Gauge[Long]].getValue)
}
def getNonHeapSnapshot = {
val metrics = getMetrics
- NonHeapMemoryUsage(metrics.get(key("non-heap-init")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("non-heap-used")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("non-heap-max")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("non-heap-committed")).asInstanceOf[Gauge[Long]].getValue,
- metrics.get(key("non-heap-usage")).asInstanceOf[RatioGauge].getValue)
+ NonHeapMemoryUsage(
+ metrics.get(key("non-heap-init")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("non-heap-used")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("non-heap-max")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("non-heap-committed")).asInstanceOf[Gauge[Long]].getValue,
+ metrics.get(key("non-heap-usage")).asInstanceOf[RatioGauge].getValue)
}
private def key(k: String) = prefix + "." + k
@@ -45,29 +48,32 @@ private[akka] trait MemoryUsageSnapshotting extends MetricsPrefix {
private[akka] case class TotalMemoryUsage(init: Long, used: Long, max: Long, committed: Long) {
def diff(other: TotalMemoryUsage): TotalMemoryUsage =
- TotalMemoryUsage(this.init - other.init,
- this.used - other.used,
- this.max - other.max,
- this.committed - other.committed)
+ TotalMemoryUsage(
+ this.init - other.init,
+ this.used - other.used,
+ this.max - other.max,
+ this.committed - other.committed)
}
private[akka] case class HeapMemoryUsage(init: Long, used: Long, max: Long, committed: Long, usage: Double) {
def diff(other: HeapMemoryUsage): HeapMemoryUsage =
- HeapMemoryUsage(this.init - other.init,
- this.used - other.used,
- this.max - other.max,
- this.committed - other.committed,
- this.usage - other.usage)
+ HeapMemoryUsage(
+ this.init - other.init,
+ this.used - other.used,
+ this.max - other.max,
+ this.committed - other.committed,
+ this.usage - other.usage)
}
private[akka] case class NonHeapMemoryUsage(init: Long, used: Long, max: Long, committed: Long, usage: Double) {
def diff(other: NonHeapMemoryUsage): NonHeapMemoryUsage =
- NonHeapMemoryUsage(this.init - other.init,
- this.used - other.used,
- this.max - other.max,
- this.committed - other.committed,
- this.usage - other.usage)
+ NonHeapMemoryUsage(
+ this.init - other.init,
+ this.used - other.used,
+ this.max - other.max,
+ this.committed - other.committed,
+ this.usage - other.usage)
}
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
index 5aa6fdde30..45d51e14c8 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
@@ -44,12 +44,14 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL {
*
* @param unitString just for human readable output, during console printing
*/
- def hdrHistogram(key: MetricKey,
- highestTrackableValue: Long,
- numberOfSignificantValueDigits: Int,
- unitString: String = ""): HdrHistogram =
- getOrRegister((key / "hdr-histogram").toString,
- new HdrHistogram(highestTrackableValue, numberOfSignificantValueDigits, unitString))
+ def hdrHistogram(
+ key: MetricKey,
+ highestTrackableValue: Long,
+ numberOfSignificantValueDigits: Int,
+ unitString: String = ""): HdrHistogram =
+ getOrRegister(
+ (key / "hdr-histogram").toString,
+ new HdrHistogram(highestTrackableValue, numberOfSignificantValueDigits, unitString))
/**
* Use when measuring for 9x'th percentiles as well as min / max / mean values.
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
index 777a9e07ee..ef43099e9a 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
@@ -15,19 +15,21 @@ import scala.reflect.ClassTag
* Used to report `akka.testkit.metric.Metric` types that the original `com.codahale.metrics.ConsoleReporter` is unaware of (cannot re-use directly because of private constructor).
*/
class AkkaConsoleReporter(registry: AkkaMetricRegistry, verbose: Boolean, output: PrintStream = System.out)
- extends ScheduledReporter(registry.asInstanceOf[MetricRegistry],
- "akka-console-reporter",
- MetricFilter.ALL,
- TimeUnit.SECONDS,
- TimeUnit.NANOSECONDS) {
+ extends ScheduledReporter(
+ registry.asInstanceOf[MetricRegistry],
+ "akka-console-reporter",
+ MetricFilter.ALL,
+ TimeUnit.SECONDS,
+ TimeUnit.NANOSECONDS) {
private final val ConsoleWidth = 80
- override def report(gauges: util.SortedMap[String, Gauge[_]],
- counters: util.SortedMap[String, Counter],
- histograms: util.SortedMap[String, Histogram],
- meters: util.SortedMap[String, Meter],
- timers: util.SortedMap[String, Timer]): Unit = {
+ override def report(
+ gauges: util.SortedMap[String, Gauge[_]],
+ counters: util.SortedMap[String, Counter],
+ histograms: util.SortedMap[String, Histogram],
+ meters: util.SortedMap[String, Meter],
+ timers: util.SortedMap[String, Timer]): Unit = {
import collection.JavaConverters._
// default Metrics types