diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
index e3704ca54f..fbfb9bee6f 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala
@@ -101,7 +101,8 @@ class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") w
"not invoke preRestart and postRestart when never restarted using OneForOneStrategy" in {
val id = newUuid().toString
- val supervisor = system.actorOf(Props(classOf[Supervisor],
+ val supervisor = system.actorOf(Props(
+ classOf[Supervisor],
OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception]))))
val gen = new AtomicInteger(0)
val props = Props(classOf[LifeCycleTestActor], testActor, id, gen)
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala
index a3edc5468c..be1e537eee 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala
@@ -249,14 +249,14 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout {
val lookname = looker.path.elements.mkString("", "/", "/")
for (
(l, r) ← Seq(
- LookupString("a/b/c") -> empty(lookname + "a/b/c"),
- LookupString("") -> system.deadLetters,
- LookupString("akka://all-systems/Nobody") -> system.deadLetters,
- LookupPath(system / "hallo") -> empty("user/hallo"),
- LookupPath(looker.path child "hallo") -> empty(lookname + "hallo"), // test Java API
- LookupPath(looker.path descendant Seq("a", "b").asJava) -> empty(lookname + "a/b"), // test Java API
- LookupElems(Seq()) -> system.deadLetters,
- LookupElems(Seq("a")) -> empty(lookname + "a"))
+ LookupString("a/b/c") → empty(lookname + "a/b/c"),
+ LookupString("") → system.deadLetters,
+ LookupString("akka://all-systems/Nobody") → system.deadLetters,
+ LookupPath(system / "hallo") → empty("user/hallo"),
+ LookupPath(looker.path child "hallo") → empty(lookname + "hallo"), // test Java API
+ LookupPath(looker.path descendant Seq("a", "b").asJava) → empty(lookname + "a/b"), // test Java API
+ LookupElems(Seq()) → system.deadLetters,
+ LookupElems(Seq("a")) → empty(lookname + "a"))
) checkOne(looker, l, r)
}
for (looker ← all) check(looker)
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala
index 6d4f2a7d3f..34a7cbccaa 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala
@@ -210,7 +210,8 @@ object ActorMailboxSpec {
final case class MCBoundedMailbox(val capacity: Int, val pushTimeOut: FiniteDuration)
extends MailboxType with ProducesMessageQueue[MCBoundedMessageQueueSemantics] {
- def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
+ def this(settings: ActorSystem.Settings, config: Config) = this(
+ config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue =
@@ -241,23 +242,29 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded deque message queue when it is only configured on the props" in {
- checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
+ checkMailboxQueue(
+ Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
"default-override-from-props", UnboundedDeqMailboxTypes)
}
"get an bounded message queue when it's only configured with RequiresMailbox" in {
- checkMailboxQueue(Props[BoundedQueueReportingActor],
+ checkMailboxQueue(
+ Props[BoundedQueueReportingActor],
"default-override-from-trait", BoundedMailboxTypes)
}
"get an unbounded deque message queue when it's only mixed with Stash" in {
- checkMailboxQueue(Props[StashQueueReportingActor],
+ checkMailboxQueue(
+ Props[StashQueueReportingActor],
"default-override-from-stash", UnboundedDeqMailboxTypes)
- checkMailboxQueue(Props(new StashQueueReportingActor),
+ checkMailboxQueue(
+ Props(new StashQueueReportingActor),
"default-override-from-stash2", UnboundedDeqMailboxTypes)
- checkMailboxQueue(Props(classOf[StashQueueReportingActorWithParams], 17, "hello"),
+ checkMailboxQueue(
+ Props(classOf[StashQueueReportingActorWithParams], 17, "hello"),
"default-override-from-stash3", UnboundedDeqMailboxTypes)
- checkMailboxQueue(Props(new StashQueueReportingActorWithParams(17, "hello")),
+ checkMailboxQueue(
+ Props(new StashQueueReportingActorWithParams(17, "hello")),
"default-override-from-stash4", UnboundedDeqMailboxTypes)
}
@@ -278,12 +285,14 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an bounded control aware message queue when it's only configured with RequiresMailbox" in {
- checkMailboxQueue(Props[BoundedControlAwareQueueReportingActor],
+ checkMailboxQueue(
+ Props[BoundedControlAwareQueueReportingActor],
"default-override-from-trait-bounded-control-aware", BoundedControlAwareMailboxTypes)
}
"get an unbounded control aware message queue when it's only configured with RequiresMailbox" in {
- checkMailboxQueue(Props[UnboundedControlAwareQueueReportingActor],
+ checkMailboxQueue(
+ Props[UnboundedControlAwareQueueReportingActor],
"default-override-from-trait-unbounded-control-aware", UnboundedControlAwareMailboxTypes)
}
@@ -317,7 +326,8 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded message queue overriding configuration on the props" in {
- checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
+ checkMailboxQueue(
+ Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
"bounded-unbounded-override-props", UnboundedMailboxTypes)
}
@@ -401,17 +411,20 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded message queue with a balancing dispatcher" in {
- checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-dispatcher"),
+ checkMailboxQueue(
+ Props[QueueReportingActor].withDispatcher("balancing-dispatcher"),
"unbounded-balancing", UnboundedMailboxTypes)
}
"get a bounded message queue with a balancing bounded dispatcher" in {
- checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"),
+ checkMailboxQueue(
+ Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"),
"bounded-balancing", BoundedMailboxTypes)
}
"get a bounded message queue with a requiring balancing bounded dispatcher" in {
- checkMailboxQueue(Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"),
+ checkMailboxQueue(
+ Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"),
"requiring-bounded-balancing", BoundedMailboxTypes)
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala
index 04a2ee9702..f6f2fdbada 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala
@@ -65,7 +65,8 @@ class ActorSelectionSpec extends AkkaSpec("akka.loglevel=DEBUG") with DefaultTim
asked.correlationId should ===(selection)
implicit val ec = system.dispatcher
- val resolved = Await.result(selection.resolveOne(timeout.duration).mapTo[ActorRef] recover { case _ ⇒ null },
+ val resolved = Await.result(
+ selection.resolveOne(timeout.duration).mapTo[ActorRef] recover { case _ ⇒ null },
timeout.duration)
Option(resolved) should ===(result)
@@ -248,11 +249,11 @@ class ActorSelectionSpec extends AkkaSpec("akka.loglevel=DEBUG") with DefaultTim
val lookname = looker.path.elements.mkString("", "/", "/")
for (
(l, r) ← Seq(
- SelectString("a/b/c") -> None,
- SelectString("akka://all-systems/Nobody") -> None,
- SelectPath(system / "hallo") -> None,
- SelectPath(looker.path child "hallo") -> None, // test Java API
- SelectPath(looker.path descendant Seq("a", "b").asJava) -> None) // test Java API
+ SelectString("a/b/c") → None,
+ SelectString("akka://all-systems/Nobody") → None,
+ SelectPath(system / "hallo") → None,
+ SelectPath(looker.path child "hallo") → None, // test Java API
+ SelectPath(looker.path descendant Seq("a", "b").asJava) → None) // test Java API
) checkOne(looker, l, r)
}
for (looker ← all) check(looker)
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala
index 933dd207e6..fa592e5e89 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala
@@ -273,7 +273,8 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
}
"allow configuration of guardian supervisor strategy" in {
- implicit val system = ActorSystem("Stop",
+ implicit val system = ActorSystem(
+ "Stop",
ConfigFactory.parseString("akka.actor.guardian-supervisor-strategy=akka.actor.StoppingSupervisorStrategy")
.withFallback(AkkaSpec.testConf))
val a = system.actorOf(Props(new Actor {
@@ -293,7 +294,8 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
}
"shut down when /user escalates" in {
- implicit val system = ActorSystem("Stop",
+ implicit val system = ActorSystem(
+ "Stop",
ConfigFactory.parseString("akka.actor.guardian-supervisor-strategy=\"akka.actor.ActorSystemSpec$Strategy\"")
.withFallback(AkkaSpec.testConf))
val a = system.actorOf(Props(new Actor {
diff --git a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala
index 83f13faac5..ad34e3678e 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala
@@ -8,12 +8,11 @@ import java.util.concurrent.atomic.AtomicInteger
import akka.testkit.EventFilter
import akka.testkit.TestKit._
import com.typesafe.config.ConfigFactory
-import org.scalatest.{Matchers, WordSpec}
+import org.scalatest.{ Matchers, WordSpec }
import org.scalatest.junit.JUnitSuiteLike
import scala.util.control.NoStackTrace
-
class JavaExtensionSpec extends JavaExtension with JUnitSuiteLike
object TestExtension extends ExtensionId[TestExtension] with ExtensionIdProvider {
@@ -52,7 +51,6 @@ class FailingTestExtension(val system: ExtendedActorSystem) extends Extension {
throw new FailingTestExtension.TestException
}
-
class ExtensionSpec extends WordSpec with Matchers {
"The ActorSystem extensions support" should {
@@ -83,9 +81,8 @@ class ExtensionSpec extends WordSpec with Matchers {
shutdownActorSystem(system)
}
-
"fail the actor system if an extension listed in akka.extensions fails to start" in {
- intercept[RuntimeException]{
+ intercept[RuntimeException] {
val system = ActorSystem("failing", ConfigFactory.parseString(
"""
akka.extensions = ["akka.actor.FailingTestExtension"]
@@ -134,7 +131,6 @@ class ExtensionSpec extends WordSpec with Matchers {
}
}
-
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala
index 15e975572e..d97da96b9f 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala
@@ -34,6 +34,7 @@ object FSMActorSpec {
class Lock(code: String, timeout: FiniteDuration, latches: Latches) extends Actor with FSM[LockState, CodeState] {
import latches._
+ import FSM.`→`
startWith(Locked, CodeState("", code))
@@ -71,7 +72,7 @@ object FSMActorSpec {
}
onTransition {
- case Locked -> Open ⇒ transitionLatch.open
+ case Locked → Open ⇒ transitionLatch.open
}
// verify that old-style does still compile
@@ -98,8 +99,9 @@ object FSMActorSpec {
final case class CodeState(soFar: String, code: String)
}
-class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with ImplicitSender {
+class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" → true)) with ImplicitSender {
import FSMActorSpec._
+ import FSM.`→`
val timeout = Timeout(2 seconds)
@@ -222,7 +224,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
case Event("stop", _) ⇒ stop()
}
onTransition {
- case "not-started" -> "started" ⇒
+ case "not-started" → "started" ⇒
for (timerName ← timerNames) setTimer(timerName, (), 10 seconds, false)
}
onTermination {
@@ -250,8 +252,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
"log events and transitions if asked to do so" in {
import scala.collection.JavaConverters._
- val config = ConfigFactory.parseMap(Map("akka.loglevel" -> "DEBUG", "akka.actor.serialize-messages" -> "off",
- "akka.actor.debug.fsm" -> true).asJava).withFallback(system.settings.config)
+ val config = ConfigFactory.parseMap(Map("akka.loglevel" → "DEBUG", "akka.actor.serialize-messages" → "off",
+ "akka.actor.debug.fsm" → true).asJava).withFallback(system.settings.config)
val fsmEventSystem = ActorSystem("fsmEvent", config)
try {
new TestKit(fsmEventSystem) {
diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala
index 0e45a1e3e5..1d405e6107 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala
@@ -129,7 +129,8 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
}
"notify unhandled messages" taggedAs TimingTest in {
- filterEvents(EventFilter.warning("unhandled event Tick in state TestUnhandled", source = fsm.path.toString, occurrences = 1),
+ filterEvents(
+ EventFilter.warning("unhandled event Tick in state TestUnhandled", source = fsm.path.toString, occurrences = 1),
EventFilter.warning("unhandled event Unhandled(test) in state TestUnhandled", source = fsm.path.toString, occurrences = 1)) {
fsm ! TestUnhandled
within(3 second) {
@@ -208,7 +209,7 @@ object FSMTimingSpec {
goto(Initial)
}
onTransition {
- case Initial -> TestSingleTimerResubmit ⇒ setTimer("blah", Tick, 500.millis.dilated)
+ case Initial → TestSingleTimerResubmit ⇒ setTimer("blah", Tick, 500.millis.dilated)
}
when(TestSingleTimerResubmit) {
case Event(Tick, _) ⇒
diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala
index c150e68370..6096e8202f 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala
@@ -9,6 +9,7 @@ import scala.concurrent.duration._
import scala.language.postfixOps
object FSMTransitionSpec {
+ import FSM.`→`
class Supervisor extends Actor {
def receive = { case _ ⇒ }
@@ -20,7 +21,7 @@ object FSMTransitionSpec {
case Event("stay", _) ⇒ stay()
case Event(_, _) ⇒ goto(0)
}
- onTransition { case from -> to ⇒ target ! (from -> to) }
+ onTransition { case from → to ⇒ target ! (from → to) }
initialize()
}
@@ -50,8 +51,8 @@ object FSMTransitionSpec {
case _ ⇒ goto(1)
}
onTransition {
- case 0 -> 1 ⇒ target ! ((stateData, nextStateData))
- case 1 -> 1 ⇒ target ! ((stateData, nextStateData))
+ case 0 → 1 ⇒ target ! ((stateData, nextStateData))
+ case 1 → 1 ⇒ target ! ((stateData, nextStateData))
}
}
@@ -64,16 +65,17 @@ object FSMTransitionSpec {
class FSMTransitionSpec extends AkkaSpec with ImplicitSender {
import FSMTransitionSpec._
+ import FSM.`→`
"A FSM transition notifier" must {
"not trigger onTransition for stay" in {
val fsm = system.actorOf(Props(new SendAnyTransitionFSM(testActor)))
- expectMsg(0 -> 0) // caused by initialize(), OK.
+ expectMsg(0 → 0) // caused by initialize(), OK.
fsm ! "stay" // no transition event
expectNoMsg(500.millis)
fsm ! "goto" // goto(current state)
- expectMsg(0 -> 0)
+ expectMsg(0 → 0)
}
"notify listeners" in {
@@ -150,7 +152,7 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender {
case Event("switch", _) ⇒ goto(1) using sender()
}
onTransition {
- case x -> y ⇒ nextStateData ! (x -> y)
+ case x → y ⇒ nextStateData ! (x → y)
}
when(1) {
case Event("test", _) ⇒
diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala
index 023878e452..65586df6e2 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala
@@ -26,6 +26,8 @@ import java.lang.System.identityHashCode
import akka.util.Helpers.ConfigOps
object SupervisorHierarchySpec {
+ import FSM.`→`
+
class FireWorkerException(msg: String) extends Exception(msg)
/**
@@ -79,7 +81,8 @@ object SupervisorHierarchySpec {
extends DispatcherConfigurator(config, prerequisites) {
private val instance: MessageDispatcher =
- new Dispatcher(this,
+ new Dispatcher(
+ this,
config.getString("id"),
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),
@@ -467,7 +470,7 @@ object SupervisorHierarchySpec {
}
onTransition {
- case Init -> Stress ⇒
+ case Init → Stress ⇒
self ! Work
idleChildren = children
activeChildren = children
@@ -532,7 +535,7 @@ object SupervisorHierarchySpec {
}
onTransition {
- case Stress -> Finishing ⇒ ignoreFailConstr = true
+ case Stress → Finishing ⇒ ignoreFailConstr = true
}
when(Finishing) {
@@ -546,7 +549,7 @@ object SupervisorHierarchySpec {
}
onTransition {
- case _ -> LastPing ⇒
+ case _ → LastPing ⇒
idleChildren foreach (_ ! "ping")
pingChildren ++= idleChildren
idleChildren = Vector.empty
@@ -563,7 +566,7 @@ object SupervisorHierarchySpec {
}
onTransition {
- case _ -> Stopping ⇒
+ case _ → Stopping ⇒
ignoreNotResumedLogs = false
hierarchy ! PingOfDeath
}
@@ -596,7 +599,7 @@ object SupervisorHierarchySpec {
stop
}
case Event(StateTimeout, _) ⇒
- errors :+= self -> ErrorLog("timeout while Stopping", Vector.empty)
+ errors :+= self → ErrorLog("timeout while Stopping", Vector.empty)
println(system.asInstanceOf[ActorSystemImpl].printTree)
getErrors(hierarchy, 10)
printErrors()
@@ -604,7 +607,7 @@ object SupervisorHierarchySpec {
testActor ! "timeout in Stopping"
stop
case Event(e: ErrorLog, _) ⇒
- errors :+= sender() -> e
+ errors :+= sender() → e
goto(Failed)
}
@@ -630,7 +633,7 @@ object SupervisorHierarchySpec {
when(Failed, stateTimeout = 5.seconds.dilated) {
case Event(e: ErrorLog, _) ⇒
if (!e.msg.startsWith("not resumed") || !ignoreNotResumedLogs)
- errors :+= sender() -> e
+ errors :+= sender() → e
stay
case Event(Terminated(r), _) if r == hierarchy ⇒
printErrors()
@@ -650,8 +653,8 @@ object SupervisorHierarchySpec {
target match {
case l: LocalActorRef ⇒
l.underlying.actor match {
- case h: Hierarchy ⇒ errors :+= target -> ErrorLog("forced", h.log)
- case _ ⇒ errors :+= target -> ErrorLog("fetched", stateCache.get(target.path).log)
+ case h: Hierarchy ⇒ errors :+= target → ErrorLog("forced", h.log)
+ case _ ⇒ errors :+= target → ErrorLog("fetched", stateCache.get(target.path).log)
}
if (depth > 0) {
l.underlying.children foreach (getErrors(_, depth - 1))
@@ -663,8 +666,8 @@ object SupervisorHierarchySpec {
target match {
case l: LocalActorRef ⇒
l.underlying.actor match {
- case h: Hierarchy ⇒ errors :+= target -> ErrorLog("forced", h.log)
- case _ ⇒ errors :+= target -> ErrorLog("fetched", stateCache.get(target.path).log)
+ case h: Hierarchy ⇒ errors :+= target → ErrorLog("forced", h.log)
+ case _ ⇒ errors :+= target → ErrorLog("fetched", stateCache.get(target.path).log)
}
if (target != hierarchy) getErrorsUp(l.getParent)
}
@@ -693,7 +696,7 @@ object SupervisorHierarchySpec {
case Event(e: ErrorLog, _) ⇒
if (e.msg.startsWith("not resumed")) stay
else {
- errors :+= sender() -> e
+ errors :+= sender() → e
// don’t stop the hierarchy, that is going to happen all by itself and in the right order
goto(Failed)
}
diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala
index dc02187aa3..0efc29a02d 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala
@@ -58,7 +58,7 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul
countDownLatch.await(10, TimeUnit.SECONDS)
- Seq("actor1" -> actor1, "actor2" -> actor2, "actor3" -> actor3, "actor4" -> actor4) map {
+ Seq("actor1" → actor1, "actor2" → actor2, "actor3" → actor3, "actor4" → actor4) map {
case (id, ref) ⇒ (id, ref ? "status")
} foreach {
case (id, f) ⇒ (id, Await.result(f, timeout.duration)) should ===((id, "OK"))
diff --git a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala
index a461b7d520..013fe2cd21 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala
@@ -16,9 +16,10 @@ object UidClashTest {
@volatile var oldActor: ActorRef = _
- private[akka] class EvilCollidingActorRef(override val provider: ActorRefProvider,
- override val path: ActorPath,
- val eventStream: EventStream) extends MinimalActorRef {
+ private[akka] class EvilCollidingActorRef(
+ override val provider: ActorRefProvider,
+ override val path: ActorPath,
+ val eventStream: EventStream) extends MinimalActorRef {
//Ignore everything
override def isTerminated: Boolean = true
diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala
index e22443d0f0..5d4fd68501 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala
@@ -181,13 +181,13 @@ object ActorModelSpec {
dispatcher.asInstanceOf[MessageDispatcherInterceptor].getStats(actorRef)
def assertRefDefaultZero(actorRef: ActorRef, dispatcher: MessageDispatcher = null)(
- suspensions: Long = 0,
- resumes: Long = 0,
- registers: Long = 0,
- unregisters: Long = 0,
- msgsReceived: Long = 0,
+ suspensions: Long = 0,
+ resumes: Long = 0,
+ registers: Long = 0,
+ unregisters: Long = 0,
+ msgsReceived: Long = 0,
msgsProcessed: Long = 0,
- restarts: Long = 0)(implicit system: ActorSystem) {
+ restarts: Long = 0)(implicit system: ActorSystem) {
assertRef(actorRef, dispatcher)(
suspensions,
resumes,
@@ -199,13 +199,13 @@ object ActorModelSpec {
}
def assertRef(actorRef: ActorRef, dispatcher: MessageDispatcher = null)(
- suspensions: Long = statsFor(actorRef, dispatcher).suspensions.get(),
- resumes: Long = statsFor(actorRef, dispatcher).resumes.get(),
- registers: Long = statsFor(actorRef, dispatcher).registers.get(),
- unregisters: Long = statsFor(actorRef, dispatcher).unregisters.get(),
- msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(),
+ suspensions: Long = statsFor(actorRef, dispatcher).suspensions.get(),
+ resumes: Long = statsFor(actorRef, dispatcher).resumes.get(),
+ registers: Long = statsFor(actorRef, dispatcher).registers.get(),
+ unregisters: Long = statsFor(actorRef, dispatcher).unregisters.get(),
+ msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(),
msgsProcessed: Long = statsFor(actorRef, dispatcher).msgsProcessed.get(),
- restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem) {
+ restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem) {
val stats = statsFor(actorRef, Option(dispatcher).getOrElse(actorRef.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].dispatcher))
val deadline = System.currentTimeMillis + 1000
try {
@@ -218,7 +218,8 @@ object ActorModelSpec {
await(deadline)(stats.restarts.get() == restarts)
} catch {
case e: Throwable ⇒
- system.eventStream.publish(Error(e,
+ system.eventStream.publish(Error(
+ e,
Option(dispatcher).toString,
(Option(dispatcher) getOrElse this).getClass,
"actual: " + stats + ", required: InterceptorStats(susp=" + suspensions +
@@ -529,7 +530,8 @@ object DispatcherModelSpec {
import akka.util.Helpers.ConfigOps
private val instance: MessageDispatcher =
- new Dispatcher(this,
+ new Dispatcher(
+ this,
config.getString("id"),
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),
@@ -602,7 +604,8 @@ object BalancingDispatcherModelSpec {
import akka.util.Helpers.ConfigOps
override protected def create(mailboxType: MailboxType): BalancingDispatcher =
- new BalancingDispatcher(this,
+ new BalancingDispatcher(
+ this,
config.getString("id"),
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),
diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala
index 386cd592e2..aecd44928d 100644
--- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala
@@ -104,15 +104,15 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
def ofType[T <: MessageDispatcher: ClassTag]: (MessageDispatcher) ⇒ Boolean = _.getClass == implicitly[ClassTag[T]].runtimeClass
def typesAndValidators: Map[String, (MessageDispatcher) ⇒ Boolean] = Map(
- "PinnedDispatcher" -> ofType[PinnedDispatcher],
- "Dispatcher" -> ofType[Dispatcher])
+ "PinnedDispatcher" → ofType[PinnedDispatcher],
+ "Dispatcher" → ofType[Dispatcher])
def validTypes = typesAndValidators.keys.toList
val defaultDispatcherConfig = settings.config.getConfig("akka.actor.default-dispatcher")
lazy val allDispatchers: Map[String, MessageDispatcher] = {
- validTypes.map(t ⇒ (t, from(ConfigFactory.parseMap(Map(tipe -> t, id -> t).asJava).
+ validTypes.map(t ⇒ (t, from(ConfigFactory.parseMap(Map(tipe → t, id → t).asJava).
withFallback(defaultDispatcherConfig)))).toMap
}
@@ -150,7 +150,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
"throw ConfigurationException if type does not exist" in {
intercept[ConfigurationException] {
- from(ConfigFactory.parseMap(Map(tipe -> "typedoesntexist", id -> "invalid-dispatcher").asJava).
+ from(ConfigFactory.parseMap(Map(tipe → "typedoesntexist", id → "invalid-dispatcher").asJava).
withFallback(defaultDispatcherConfig))
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala
index d7209c65b0..147a11b43f 100644
--- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala
@@ -125,55 +125,57 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn
q.hasMessages should ===(false)
}
- def testEnqueueDequeue(config: MailboxType,
- enqueueN: Int = 10000,
- dequeueN: Int = 10000,
- parallel: Boolean = true): Unit = within(10 seconds) {
+ def testEnqueueDequeue(
+ config: MailboxType,
+ enqueueN: Int = 10000,
+ dequeueN: Int = 10000,
+ parallel: Boolean = true): Unit = within(10 seconds) {
val q = factory(config)
ensureInitialMailboxState(config, q)
- EventFilter.warning(pattern = ".*received dead letter from Actor.*MailboxSpec/deadLetters.*",
+ EventFilter.warning(
+ pattern = ".*received dead letter from Actor.*MailboxSpec/deadLetters.*",
occurrences = (enqueueN - dequeueN)) intercept {
- def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn {
- val messages = Vector() ++ (for (i ← fromNum to toNum) yield createMessageInvocation(i))
- for (i ← messages) q.enqueue(testActor, i)
- messages
- }
-
- val producers = {
- val step = 500
- val ps = for (i ← (1 to enqueueN by step).toList) yield createProducer(i, Math.min(enqueueN, i + step - 1))
-
- if (parallel == false)
- ps foreach { Await.ready(_, remainingOrDefault) }
-
- ps
- }
-
- def createConsumer: Future[Vector[Envelope]] = spawn {
- var r = Vector[Envelope]()
-
- while (producers.exists(_.isCompleted == false) || q.hasMessages)
- Option(q.dequeue) foreach { message ⇒ r = r :+ message }
-
- r
- }
-
- val consumers = List.fill(maxConsumers)(createConsumer)
-
- val ps = producers.map(Await.result(_, remainingOrDefault))
- val cs = consumers.map(Await.result(_, remainingOrDefault))
-
- ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages
- cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages
- //No message is allowed to be consumed by more than one consumer
- cs.flatten.distinct.size should ===(dequeueN)
- //All consumed messages should have been produced
- (cs.flatten diff ps.flatten).size should ===(0)
- //The ones that were produced and not consumed
- (ps.flatten diff cs.flatten).size should ===(enqueueN - dequeueN)
+ def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn {
+ val messages = Vector() ++ (for (i ← fromNum to toNum) yield createMessageInvocation(i))
+ for (i ← messages) q.enqueue(testActor, i)
+ messages
}
+
+ val producers = {
+ val step = 500
+ val ps = for (i ← (1 to enqueueN by step).toList) yield createProducer(i, Math.min(enqueueN, i + step - 1))
+
+ if (parallel == false)
+ ps foreach { Await.ready(_, remainingOrDefault) }
+
+ ps
+ }
+
+ def createConsumer: Future[Vector[Envelope]] = spawn {
+ var r = Vector[Envelope]()
+
+ while (producers.exists(_.isCompleted == false) || q.hasMessages)
+ Option(q.dequeue) foreach { message ⇒ r = r :+ message }
+
+ r
+ }
+
+ val consumers = List.fill(maxConsumers)(createConsumer)
+
+ val ps = producers.map(Await.result(_, remainingOrDefault))
+ val cs = consumers.map(Await.result(_, remainingOrDefault))
+
+ ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages
+ cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages
+ //No message is allowed to be consumed by more than one consumer
+ cs.flatten.distinct.size should ===(dequeueN)
+ //All consumed messages should have been produced
+ (cs.flatten diff ps.flatten).size should ===(0)
+ //The ones that were produced and not consumed
+ (ps.flatten diff cs.flatten).size should ===(enqueueN - dequeueN)
+ }
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala
index 11d04ec6b4..344eb161f2 100644
--- a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala
@@ -10,8 +10,8 @@ import org.scalatest.BeforeAndAfterEach
import akka.testkit._
import scala.concurrent.duration._
-import akka.actor.{ Props, Actor, ActorRef, ActorSystem, PoisonPill}
-import akka.japi.{ Procedure}
+import akka.actor.{ Props, Actor, ActorRef, ActorSystem, PoisonPill }
+import akka.japi.{ Procedure }
import com.typesafe.config.{ Config, ConfigFactory }
object EventBusSpec {
diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala
index a62ec761e8..88c5592863 100644
--- a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala
@@ -117,10 +117,10 @@ object LoggerSpec {
override def mdc(currentMessage: Any): MDC = {
reqId += 1
- val always = Map("requestId" -> reqId)
+ val always = Map("requestId" → reqId)
val cmim = "Current Message in MDC"
val perMessage = currentMessage match {
- case `cmim` ⇒ Map[String, Any]("currentMsg" -> cmim, "currentMsgLength" -> cmim.length)
+ case `cmim` ⇒ Map[String, Any]("currentMsg" → cmim, "currentMsgLength" → cmim.length)
case _ ⇒ Map()
}
always ++ perMessage
diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala
index 063ee72f9f..02e84f44d1 100644
--- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala
@@ -28,9 +28,9 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll {
akka.loglevel=DEBUG
akka.actor.serialize-messages = off # debug noise from serialization
""").withFallback(AkkaSpec.testConf)
- val appLogging = ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" -> true).asJava).withFallback(config))
- val appAuto = ActorSystem("autoreceive", ConfigFactory.parseMap(Map("akka.actor.debug.autoreceive" -> true).asJava).withFallback(config))
- val appLifecycle = ActorSystem("lifecycle", ConfigFactory.parseMap(Map("akka.actor.debug.lifecycle" -> true).asJava).withFallback(config))
+ val appLogging = ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" → true).asJava).withFallback(config))
+ val appAuto = ActorSystem("autoreceive", ConfigFactory.parseMap(Map("akka.actor.debug.autoreceive" → true).asJava).withFallback(config))
+ val appLifecycle = ActorSystem("lifecycle", ConfigFactory.parseMap(Map("akka.actor.debug.lifecycle" → true).asJava).withFallback(config))
val filter = TestEvent.Mute(EventFilter.custom {
case _: Logging.Debug ⇒ true
diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala
index dd324fb596..1dcacf1b0d 100644
--- a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala
@@ -886,10 +886,11 @@ class TcpConnectionSpec extends AkkaSpec("""
def setServerSocketOptions() = ()
- def createConnectionActor(serverAddress: InetSocketAddress = serverAddress,
- options: immutable.Seq[SocketOption] = Nil,
- timeout: Option[FiniteDuration] = None,
- pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = {
+ def createConnectionActor(
+ serverAddress: InetSocketAddress = serverAddress,
+ options: immutable.Seq[SocketOption] = Nil,
+ timeout: Option[FiniteDuration] = None,
+ pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = {
val ref = createConnectionActorWithoutRegistration(serverAddress, options, timeout, pullMode)
ref ! newChannelRegistration
ref
@@ -901,10 +902,11 @@ class TcpConnectionSpec extends AkkaSpec("""
def disableInterest(op: Int): Unit = interestCallReceiver.ref ! -op
}
- def createConnectionActorWithoutRegistration(serverAddress: InetSocketAddress = serverAddress,
- options: immutable.Seq[SocketOption] = Nil,
- timeout: Option[FiniteDuration] = None,
- pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] =
+ def createConnectionActorWithoutRegistration(
+ serverAddress: InetSocketAddress = serverAddress,
+ options: immutable.Seq[SocketOption] = Nil,
+ timeout: Option[FiniteDuration] = None,
+ pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] =
TestActorRef(
new TcpOutgoingConnection(Tcp(system), this, userHandler.ref,
Connect(serverAddress, options = options, timeout = timeout, pullMode = pullMode)) {
@@ -931,8 +933,8 @@ class TcpConnectionSpec extends AkkaSpec("""
abstract class EstablishedConnectionTest(
keepOpenOnPeerClosed: Boolean = false,
- useResumeWriting: Boolean = true,
- pullMode: Boolean = false)
+ useResumeWriting: Boolean = true,
+ pullMode: Boolean = false)
extends UnacceptedConnectionTest(pullMode) {
// lazy init since potential exceptions should not be triggered in the constructor but during execution of `run`
@@ -1074,7 +1076,7 @@ class TcpConnectionSpec extends AkkaSpec("""
}
val interestsNames =
- Seq(OP_ACCEPT -> "accepting", OP_CONNECT -> "connecting", OP_READ -> "reading", OP_WRITE -> "writing")
+ Seq(OP_ACCEPT → "accepting", OP_CONNECT → "connecting", OP_READ → "reading", OP_WRITE → "writing")
def interestsDesc(interests: Int): String =
interestsNames.filter(i ⇒ (i._1 & interests) != 0).map(_._2).mkString(", ")
diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala
index 50ab314c86..4834a4a193 100644
--- a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala
@@ -185,11 +185,11 @@ class TcpIntegrationSpec extends AkkaSpec("""
}
def chitchat(
- clientHandler: TestProbe,
+ clientHandler: TestProbe,
clientConnection: ActorRef,
- serverHandler: TestProbe,
+ serverHandler: TestProbe,
serverConnection: ActorRef,
- rounds: Int = 100) = {
+ rounds: Int = 100) = {
val testData = ByteString(0)
(1 to rounds) foreach { _ ⇒
diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala
index 8838af1e6b..088026f8c1 100644
--- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala
@@ -213,7 +213,7 @@ class AskSpec extends AkkaSpec {
val act = system.actorOf(Props(new Actor {
def receive = {
- case msg ⇒ p.ref ! sender() -> msg
+ case msg ⇒ p.ref ! sender() → msg
}
}))
diff --git a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala
index 4a43515fa3..ee10ccd5f1 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala
@@ -44,9 +44,10 @@ object MetricsBasedResizerSpec {
var msgs: Set[TestLatch] = Set()
- def mockSend(await: Boolean,
- l: TestLatch = TestLatch(),
- routeeIdx: Int = Random.nextInt(routees.length)): Latches = {
+ def mockSend(
+ await: Boolean,
+ l: TestLatch = TestLatch(),
+ routeeIdx: Int = Random.nextInt(routees.length)): Latches = {
val target = routees(routeeIdx)
val first = TestLatch()
val latches = Latches(first, l)
diff --git a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala
index cf94967653..3dd1b2b0f0 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala
@@ -50,7 +50,7 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
val counter = new AtomicInteger
var replies = Map.empty[Int, Int]
for (i ← 0 until connectionCount) {
- replies = replies + (i -> 0)
+ replies = replies + (i → 0)
}
val actor = system.actorOf(RandomPool(connectionCount).props(routeeProps =
@@ -65,7 +65,7 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (i ← 0 until iterationCount) {
for (k ← 0 until connectionCount) {
val id = Await.result((actor ? "hit").mapTo[Int], timeout.duration)
- replies = replies + (id -> (replies(id) + 1))
+ replies = replies + (id → (replies(id) + 1))
}
}
diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala
index e531d7c682..4d94f427f9 100644
--- a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala
@@ -64,7 +64,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (_ ← 1 to iterationCount; _ ← 1 to connectionCount) {
val id = Await.result((actor ? "hit").mapTo[Int], timeout.duration)
- replies = replies + (id -> (replies(id) + 1))
+ replies = replies + (id → (replies(id) + 1))
}
counter.get should ===(connectionCount)
@@ -138,7 +138,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (_ ← 1 to iterationCount; _ ← 1 to connectionCount) {
val id = Await.result((actor ? "hit").mapTo[String], timeout.duration)
- replies = replies + (id -> (replies(id) + 1))
+ replies = replies + (id → (replies(id) + 1))
}
actor ! akka.routing.Broadcast("end")
@@ -184,7 +184,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (_ ← 1 to iterationCount; _ ← 1 to connectionCount) {
val id = Await.result((actor ? "hit").mapTo[String], timeout.duration)
- replies = replies + (id -> (replies(id) + 1))
+ replies = replies + (id → (replies(id) + 1))
}
watch(actor)
diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala
index fa76783b5c..701d72c16a 100644
--- a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala
@@ -323,7 +323,8 @@ class SerializationCompatibilitySpec extends AkkaSpec(SerializationTests.mostlyR
"be preserved for the Create SystemMessage" in {
// Using null as the cause to avoid a large serialized message and JDK differences
- verify(Create(Some(null)),
+ verify(
+ Create(Some(null)),
if (scala.util.Properties.versionNumberString.startsWith("2.10.")) {
"aced00057372001b616b6b612e64697370617463682e7379736d73672e4372656174650000000000" +
"0000010200014c00076661696c75726574000e4c7363616c612f4f7074696f6e3b78707372000a73" +
@@ -337,53 +338,62 @@ class SerializationCompatibilitySpec extends AkkaSpec(SerializationTests.mostlyR
})
}
"be preserved for the Recreate SystemMessage" in {
- verify(Recreate(null),
+ verify(
+ Recreate(null),
"aced00057372001d616b6b612e64697370617463682e7379736d73672e5265637265617465000000" +
"00000000010200014c000563617573657400154c6a6176612f6c616e672f5468726f7761626c653b" +
"787070")
}
"be preserved for the Suspend SystemMessage" in {
- verify(Suspend(),
+ verify(
+ Suspend(),
"aced00057372001c616b6b612e64697370617463682e7379736d73672e53757370656e6400000000" +
"000000010200007870")
}
"be preserved for the Resume SystemMessage" in {
- verify(Resume(null),
+ verify(
+ Resume(null),
"aced00057372001b616b6b612e64697370617463682e7379736d73672e526573756d650000000000" +
"0000010200014c000f63617573656442794661696c7572657400154c6a6176612f6c616e672f5468" +
"726f7761626c653b787070")
}
"be preserved for the Terminate SystemMessage" in {
- verify(Terminate(),
+ verify(
+ Terminate(),
"aced00057372001e616b6b612e64697370617463682e7379736d73672e5465726d696e6174650000" +
"0000000000010200007870")
}
"be preserved for the Supervise SystemMessage" in {
- verify(Supervise(null, true),
+ verify(
+ Supervise(null, true),
"aced00057372001e616b6b612e64697370617463682e7379736d73672e5375706572766973650000" +
"0000000000010200025a00056173796e634c00056368696c647400154c616b6b612f6163746f722f" +
"4163746f725265663b78700170")
}
"be preserved for the Watch SystemMessage" in {
- verify(Watch(null, null),
+ verify(
+ Watch(null, null),
"aced00057372001a616b6b612e64697370617463682e7379736d73672e5761746368000000000000" +
"00010200024c00077761746368656574001d4c616b6b612f6163746f722f496e7465726e616c4163" +
"746f725265663b4c00077761746368657271007e000178707070")
}
"be preserved for the Unwatch SystemMessage" in {
- verify(Unwatch(null, null),
+ verify(
+ Unwatch(null, null),
"aced00057372001c616b6b612e64697370617463682e7379736d73672e556e776174636800000000" +
"000000010200024c0007776174636865657400154c616b6b612f6163746f722f4163746f72526566" +
"3b4c00077761746368657271007e000178707070")
}
"be preserved for the NoMessage SystemMessage" in {
- verify(NoMessage,
+ verify(
+ NoMessage,
"aced00057372001f616b6b612e64697370617463682e7379736d73672e4e6f4d6573736167652400" +
"000000000000010200007870")
}
"be preserved for the Failed SystemMessage" in {
// Using null as the cause to avoid a large serialized message and JDK differences
- verify(Failed(null, cause = null, uid = 0),
+ verify(
+ Failed(null, cause = null, uid = 0),
"aced00057372001b616b6b612e64697370617463682e7379736d73672e4661696c65640000000000" +
"0000010200034900037569644c000563617573657400154c6a6176612f6c616e672f5468726f7761" +
"626c653b4c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b78700000" +
diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
index b1291e52d4..57dac8c482 100644
--- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala
@@ -121,7 +121,7 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers {
val (bsAIt, bsBIt) = (a.iterator, b.iterator)
val (vecAIt, vecBIt) = (Vector(a: _*).iterator.buffered, Vector(b: _*).iterator.buffered)
(body(bsAIt, bsBIt) == body(vecAIt, vecBIt)) &&
- (!strict || (bsAIt.toSeq -> bsBIt.toSeq) == (vecAIt.toSeq -> vecBIt.toSeq))
+ (!strict || (bsAIt.toSeq → bsBIt.toSeq) == (vecAIt.toSeq → vecBIt.toSeq))
}
def likeVecBld(body: Builder[Byte, _] ⇒ Unit): Boolean = {
diff --git a/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala
index c34c6f26fe..ead93a64e3 100644
--- a/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala
+++ b/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala
@@ -15,16 +15,16 @@ class PrettyDurationSpec extends FlatSpec with Matchers {
import scala.concurrent.duration._
val cases: Seq[(Duration, String)] =
- 9.nanos -> "9.000 ns" ::
- 95.nanos -> "95.00 ns" ::
- 999.nanos -> "999.0 ns" ::
- 1000.nanos -> "1.000 μs" ::
- 9500.nanos -> "9.500 μs" ::
- 9500.micros -> "9.500 ms" ::
- 9500.millis -> "9.500 s" ::
- 95.seconds -> "1.583 min" ::
- 95.minutes -> "1.583 h" ::
- 95.hours -> "3.958 d" ::
+ 9.nanos → "9.000 ns" ::
+ 95.nanos → "95.00 ns" ::
+ 999.nanos → "999.0 ns" ::
+ 1000.nanos → "1.000 μs" ::
+ 9500.nanos → "9.500 μs" ::
+ 9500.micros → "9.500 ms" ::
+ 9500.millis → "9.500 s" ::
+ 95.seconds → "1.583 min" ::
+ 95.minutes → "1.583 h" ::
+ 95.hours → "3.958 d" ::
Nil
cases foreach {
diff --git a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala
index 50c5900202..62c0c86ad0 100644
--- a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala
+++ b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala
@@ -66,9 +66,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param stateTimeout default state timeout for this state
* @param stateFunctionBuilder partial function builder describing response to input
*/
- final def when(stateName: S,
- stateTimeout: FiniteDuration,
- stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit =
+ final def when(
+ stateName: S,
+ stateTimeout: FiniteDuration,
+ stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit =
when(stateName, stateTimeout)(stateFunctionBuilder.build())
/**
diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala
index dde2ae4e07..7be9612952 100644
--- a/akka-actor/src/main/scala/akka/actor/Actor.scala
+++ b/akka-actor/src/main/scala/akka/actor/Actor.scala
@@ -96,7 +96,7 @@ final case class ActorIdentity(correlationId: Any, ref: Option[ActorRef]) {
@SerialVersionUID(1L)
final case class Terminated private[akka] (@BeanProperty actor: ActorRef)(
@BeanProperty val existenceConfirmed: Boolean,
- @BeanProperty val addressTerminated: Boolean)
+ @BeanProperty val addressTerminated: Boolean)
extends AutoReceivedMessage with PossiblyHarmful with DeadLetterSuppression
/**
@@ -189,7 +189,8 @@ object ActorInitializationException {
*/
@SerialVersionUID(1L)
final case class PreRestartException private[akka] (actor: ActorRef, cause: Throwable, originalCause: Throwable, messageOption: Option[Any])
- extends ActorInitializationException(actor,
+ extends ActorInitializationException(
+ actor,
"exception in preRestart(" +
(if (originalCause == null) "null" else originalCause.getClass) + ", " +
(messageOption match { case Some(m: AnyRef) ⇒ m.getClass; case _ ⇒ "None" }) +
@@ -205,7 +206,8 @@ final case class PreRestartException private[akka] (actor: ActorRef, cause: Thro
*/
@SerialVersionUID(1L)
final case class PostRestartException private[akka] (actor: ActorRef, cause: Throwable, originalCause: Throwable)
- extends ActorInitializationException(actor,
+ extends ActorInitializationException(
+ actor,
"exception post restart (" + (if (originalCause == null) "null" else originalCause.getClass) + ")", cause)
/**
diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala
index e5753a4c08..4dea299d93 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala
@@ -372,11 +372,11 @@ private[akka] object ActorCell {
* for! (waves hand)
*/
private[akka] class ActorCell(
- val system: ActorSystemImpl,
- val self: InternalActorRef,
+ val system: ActorSystemImpl,
+ val self: InternalActorRef,
final val props: Props, // Must be final so that it can be properly cleared in clearActorCellFields
- val dispatcher: MessageDispatcher,
- val parent: InternalActorRef)
+ val dispatcher: MessageDispatcher,
+ val parent: InternalActorRef)
extends UntypedActorContext with AbstractActorContext with Cell
with dungeon.ReceiveTimeout
with dungeon.Children
@@ -598,7 +598,8 @@ private[akka] class ActorCell(
case NonFatal(e) ⇒
clearOutActorIfNonNull()
e match {
- case i: InstantiationException ⇒ throw ActorInitializationException(self,
+ case i: InstantiationException ⇒ throw ActorInitializationException(
+ self,
"""exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either,
a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new Creator ... )
or is missing an appropriate, reachable no-args constructor.
diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala
index 4336464054..989c4af7d3 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala
@@ -254,7 +254,8 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable {
*/
@SerialVersionUID(1L)
final case class RootActorPath(address: Address, name: String = "/") extends ActorPath {
- require(name.length == 1 || name.indexOf('/', 1) == -1,
+ require(
+ name.length == 1 || name.indexOf('/', 1) == -1,
"/ may only exist at the beginning of the root actors name, " +
"it is a path separator and is not legal in ActorPath names: [%s]" format name)
require(name.indexOf('#') == -1, "# is a fragment separator and is not legal in ActorPath names: [%s]" format name)
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
index 0e2cc5bc17..523a12e44b 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala
@@ -302,11 +302,11 @@ private[akka] case object Nobody extends MinimalActorRef {
* INTERNAL API
*/
private[akka] class LocalActorRef private[akka] (
- _system: ActorSystemImpl,
- _props: Props,
- _dispatcher: MessageDispatcher,
- _mailboxType: MailboxType,
- _supervisor: InternalActorRef,
+ _system: ActorSystemImpl,
+ _props: Props,
+ _dispatcher: MessageDispatcher,
+ _mailboxType: MailboxType,
+ _supervisor: InternalActorRef,
override val path: ActorPath)
extends ActorRefWithCell with LocalRef {
@@ -518,9 +518,10 @@ private[akka] object DeadLetterActorRef {
*
* INTERNAL API
*/
-private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider,
- override val path: ActorPath,
- val eventStream: EventStream) extends MinimalActorRef {
+private[akka] class EmptyLocalActorRef(
+ override val provider: ActorRefProvider,
+ override val path: ActorPath,
+ val eventStream: EventStream) extends MinimalActorRef {
@deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2")
override private[akka] def isTerminated = true
@@ -570,9 +571,10 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider,
*
* INTERNAL API
*/
-private[akka] class DeadLetterActorRef(_provider: ActorRefProvider,
- _path: ActorPath,
- _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) {
+private[akka] class DeadLetterActorRef(
+ _provider: ActorRefProvider,
+ _path: ActorPath,
+ _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) {
override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match {
case null ⇒ throw new InvalidMessageException("Message is null")
@@ -601,10 +603,10 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider,
* INTERNAL API
*/
private[akka] class VirtualPathContainer(
- override val provider: ActorRefProvider,
- override val path: ActorPath,
+ override val provider: ActorRefProvider,
+ override val path: ActorPath,
override val getParent: InternalActorRef,
- val log: LoggingAdapter) extends MinimalActorRef {
+ val log: LoggingAdapter) extends MinimalActorRef {
private val children = new ConcurrentHashMap[String, InternalActorRef]
@@ -705,10 +707,11 @@ private[akka] class VirtualPathContainer(
* When using the watch() feature you must ensure that upon reception of the
* Terminated message the watched actorRef is unwatch()ed.
*/
-private[akka] final class FunctionRef(override val path: ActorPath,
- override val provider: ActorRefProvider,
- val eventStream: EventStream,
- f: (ActorRef, Any) ⇒ Unit) extends MinimalActorRef {
+private[akka] final class FunctionRef(
+ override val path: ActorPath,
+ override val provider: ActorRefProvider,
+ val eventStream: EventStream,
+ f: (ActorRef, Any) ⇒ Unit) extends MinimalActorRef {
override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = {
f(sender, message)
diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
index 2dc6aed149..bf99880055 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala
@@ -105,14 +105,14 @@ trait ActorRefProvider {
* the latter can be suppressed by setting ``lookupDeploy`` to ``false``.
*/
def actorOf(
- system: ActorSystemImpl,
- props: Props,
- supervisor: InternalActorRef,
- path: ActorPath,
+ system: ActorSystemImpl,
+ props: Props,
+ supervisor: InternalActorRef,
+ path: ActorPath,
systemService: Boolean,
- deploy: Option[Deploy],
- lookupDeploy: Boolean,
- async: Boolean): InternalActorRef
+ deploy: Option[Deploy],
+ lookupDeploy: Boolean,
+ async: Boolean): InternalActorRef
/**
* INTERNAL API
@@ -475,20 +475,22 @@ private[akka] object LocalActorRefProvider {
* Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported.
*/
private[akka] class LocalActorRefProvider private[akka] (
- _systemName: String,
+ _systemName: String,
override val settings: ActorSystem.Settings,
- val eventStream: EventStream,
- val dynamicAccess: DynamicAccess,
+ val eventStream: EventStream,
+ val dynamicAccess: DynamicAccess,
override val deployer: Deployer,
- _deadLetters: Option[ActorPath ⇒ InternalActorRef])
+ _deadLetters: Option[ActorPath ⇒ InternalActorRef])
extends ActorRefProvider {
// this is the constructor needed for reflectively instantiating the provider
- def this(_systemName: String,
- settings: ActorSystem.Settings,
- eventStream: EventStream,
- dynamicAccess: DynamicAccess) =
- this(_systemName,
+ def this(
+ _systemName: String,
+ settings: ActorSystem.Settings,
+ eventStream: EventStream,
+ dynamicAccess: DynamicAccess) =
+ this(
+ _systemName,
settings,
eventStream,
dynamicAccess,
@@ -776,7 +778,8 @@ private[akka] class LocalActorRefProvider private[akka] (
if (!system.dispatchers.hasDispatcher(r.routerDispatcher))
throw new ConfigurationException(s"Dispatcher [${p.dispatcher}] not configured for router of $path")
- val routerProps = Props(p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher),
+ val routerProps = Props(
+ p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher),
classOf[RoutedActorCell.RouterActorCreator], Vector(p.routerConfig))
val routeeProps = p.withRouter(NoRouter)
diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
index 278bcf0d43..d013f20ad0 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala
@@ -218,7 +218,8 @@ object ActorSelection {
if (matchingChildren.isEmpty && !sel.wildcardFanOut)
emptyRef.tell(sel, sender)
else {
- val m = sel.copy(elements = iter.toVector,
+ val m = sel.copy(
+ elements = iter.toVector,
wildcardFanOut = sel.wildcardFanOut || matchingChildren.size > 1)
matchingChildren.foreach(c ⇒ deliverSelection(c.asInstanceOf[InternalActorRef], sender, m))
}
@@ -253,8 +254,8 @@ trait ScalaActorSelection {
*/
@SerialVersionUID(2L) // it has protobuf serialization in akka-remote
private[akka] final case class ActorSelectionMessage(
- msg: Any,
- elements: immutable.Iterable[SelectionPathElement],
+ msg: Any,
+ elements: immutable.Iterable[SelectionPathElement],
wildcardFanOut: Boolean)
extends AutoReceivedMessage with PossiblyHarmful {
diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
index a981f32500..c0789e1481 100644
--- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
+++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala
@@ -505,11 +505,11 @@ abstract class ExtendedActorSystem extends ActorSystem {
}
private[akka] class ActorSystemImpl(
- val name: String,
- applicationConfig: Config,
- classLoader: ClassLoader,
+ val name: String,
+ applicationConfig: Config,
+ classLoader: ClassLoader,
defaultExecutionContext: Option[ExecutionContext],
- val guardianProps: Option[Props]) extends ExtendedActorSystem {
+ val guardianProps: Option[Props]) extends ExtendedActorSystem {
if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-_]*$"""))
throw new IllegalArgumentException(
@@ -593,7 +593,7 @@ private[akka] class ActorSystemImpl(
eventStream.startStdoutLogger(settings)
val logFilter: LoggingFilter = {
- val arguments = Vector(classOf[Settings] -> settings, classOf[EventStream] -> eventStream)
+ val arguments = Vector(classOf[Settings] → settings, classOf[EventStream] → eventStream)
dynamicAccess.createInstanceFor[LoggingFilter](LoggingFilter, arguments).get
}
@@ -603,10 +603,10 @@ private[akka] class ActorSystemImpl(
val provider: ActorRefProvider = try {
val arguments = Vector(
- classOf[String] -> name,
- classOf[Settings] -> settings,
- classOf[EventStream] -> eventStream,
- classOf[DynamicAccess] -> dynamicAccess)
+ classOf[String] → name,
+ classOf[Settings] → settings,
+ classOf[EventStream] → eventStream,
+ classOf[DynamicAccess] → dynamicAccess)
dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get
} catch {
@@ -698,9 +698,9 @@ private[akka] class ActorSystemImpl(
*/
protected def createScheduler(): Scheduler =
dynamicAccess.createInstanceFor[Scheduler](settings.SchedulerClass, immutable.Seq(
- classOf[Config] -> settings.config,
- classOf[LoggingAdapter] -> log,
- classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler"))).get
+ classOf[Config] → settings.config,
+ classOf[LoggingAdapter] → log,
+ classOf[ThreadFactory] → threadFactory.withName(threadFactory.name + "-scheduler"))).get
//#create-scheduler
/*
@@ -767,12 +767,12 @@ private[akka] class ActorSystemImpl(
def loadExtensions(key: String, throwOnLoadFail: Boolean): Unit = {
immutableSeq(settings.config.getStringList(key)) foreach { fqcn ⇒
dynamicAccess.getObjectFor[AnyRef](fqcn) recoverWith { case _ ⇒ dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil) } match {
- case Success(p: ExtensionIdProvider) ⇒ registerExtension(p.lookup())
- case Success(p: ExtensionId[_]) ⇒ registerExtension(p)
- case Success(other)⇒
+ case Success(p: ExtensionIdProvider) ⇒ registerExtension(p.lookup())
+ case Success(p: ExtensionId[_]) ⇒ registerExtension(p)
+ case Success(other) ⇒
if (!throwOnLoadFail) log.error("[{}] is not an 'ExtensionIdProvider' or 'ExtensionId', skipping...", fqcn)
else throw new RuntimeException(s"[$fqcn] is not an 'ExtensionIdProvider' or 'ExtensionId'")
- case Failure(problem) ⇒
+ case Failure(problem) ⇒
if (!throwOnLoadFail) log.error(problem, "While trying to load extension [{}], skipping...", fqcn)
else throw new RuntimeException(s"While trying to load extension [$fqcn]", problem)
}
diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala
index 7f71bb072a..9431139801 100644
--- a/akka-actor/src/main/scala/akka/actor/Deployer.scala
+++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala
@@ -35,12 +35,12 @@ object Deploy {
*/
@SerialVersionUID(2L)
final case class Deploy(
- path: String = "",
- config: Config = ConfigFactory.empty,
+ path: String = "",
+ config: Config = ConfigFactory.empty,
routerConfig: RouterConfig = NoRouter,
- scope: Scope = NoScopeGiven,
- dispatcher: String = Deploy.NoDispatcherGiven,
- mailbox: String = Deploy.NoMailboxGiven) {
+ scope: Scope = NoScopeGiven,
+ dispatcher: String = Deploy.NoDispatcherGiven,
+ mailbox: String = Deploy.NoMailboxGiven) {
/**
* Java API to create a Deploy with the given RouterConfig
@@ -137,7 +137,7 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce
protected val default = config.getConfig("default")
val routerTypeMapping: Map[String, String] =
settings.config.getConfig("akka.actor.router.type-mapping").root.unwrapped.asScala.collect {
- case (key, value: String) ⇒ (key -> value)
+ case (key, value: String) ⇒ (key → value)
}.toMap
config.root.asScala flatMap {
@@ -198,8 +198,8 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce
s"[${args(0)._1.getName}] and optional [${args(1)._1.getName}] parameter", cause)
// first try with Config param, and then with Config and DynamicAccess parameters
- val args1 = List(classOf[Config] -> deployment2)
- val args2 = List(classOf[Config] -> deployment2, classOf[DynamicAccess] -> dynamicAccess)
+ val args1 = List(classOf[Config] → deployment2)
+ val args2 = List(classOf[Config] → deployment2, classOf[DynamicAccess] → dynamicAccess)
dynamicAccess.createInstanceFor[RouterConfig](fqn, args1).recover({
case e @ (_: IllegalArgumentException | _: ConfigException) ⇒ throw e
case e: NoSuchMethodException ⇒
diff --git a/akka-actor/src/main/scala/akka/actor/Extension.scala b/akka-actor/src/main/scala/akka/actor/Extension.scala
index 175e839143..077785c28f 100644
--- a/akka-actor/src/main/scala/akka/actor/Extension.scala
+++ b/akka-actor/src/main/scala/akka/actor/Extension.scala
@@ -150,5 +150,5 @@ abstract class ExtensionKey[T <: Extension](implicit m: ClassTag[T]) extends Ext
def this(clazz: Class[T]) = this()(ClassTag(clazz))
override def lookup(): ExtensionId[T] = this
- def createExtension(system: ExtendedActorSystem): T = system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] -> system)).get
+ def createExtension(system: ExtendedActorSystem): T = system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] → system)).get
}
diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala
index bb0c35145e..242083a730 100644
--- a/akka-actor/src/main/scala/akka/actor/FSM.scala
+++ b/akka-actor/src/main/scala/akka/actor/FSM.scala
@@ -110,9 +110,10 @@ object FSM {
* This extractor is just convenience for matching a (S, S) pair, including a
* reminder what the new state is.
*/
- object -> {
+ object `->` {
def unapply[S](in: (S, S)) = Some(in)
}
+ val `→` = `->`
/**
* Log Entry of the [[akka.actor.LoggingFSM]], can be obtained by calling `getLog`.
@@ -319,7 +320,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
* This extractor is just convenience for matching a (S, S) pair, including a
* reminder what the new state is.
*/
- val -> = FSM.->
+ val `->` = FSM.`->`
/**
* This case object is received in case of a state timeout.
diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
index e2e38d7eb8..186888d725 100644
--- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
+++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala
@@ -380,9 +380,9 @@ abstract class SupervisorStrategy {
* @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled
*/
case class AllForOneStrategy(
- maxNrOfRetries: Int = -1,
- withinTimeRange: Duration = Duration.Inf,
- override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
+ maxNrOfRetries: Int = -1,
+ withinTimeRange: Duration = Duration.Inf,
+ override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
extends SupervisorStrategy {
import SupervisorStrategy._
@@ -458,9 +458,9 @@ case class AllForOneStrategy(
* @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled
*/
case class OneForOneStrategy(
- maxNrOfRetries: Int = -1,
- withinTimeRange: Duration = Duration.Inf,
- override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
+ maxNrOfRetries: Int = -1,
+ withinTimeRange: Duration = Duration.Inf,
+ override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
extends SupervisorStrategy {
/**
diff --git a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala
index bb9a627b22..4f62dbe812 100644
--- a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala
+++ b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala
@@ -34,9 +34,10 @@ import akka.dispatch.AbstractNodeQueue
* scheduled possibly one tick later than they could be (if checking that
* “now() + delay <= nextTick” were done).
*/
-class LightArrayRevolverScheduler(config: Config,
- log: LoggingAdapter,
- threadFactory: ThreadFactory)
+class LightArrayRevolverScheduler(
+ config: Config,
+ log: LoggingAdapter,
+ threadFactory: ThreadFactory)
extends Scheduler with Closeable {
import Helpers.Requiring
@@ -88,9 +89,10 @@ class LightArrayRevolverScheduler(config: Config,
}
}
- override def schedule(initialDelay: FiniteDuration,
- delay: FiniteDuration,
- runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = {
+ override def schedule(
+ initialDelay: FiniteDuration,
+ delay: FiniteDuration,
+ runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = {
checkMaxDelay(roundUp(delay).toNanos)
val preparedEC = executor.prepare()
try new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self ⇒
@@ -221,7 +223,7 @@ class LightArrayRevolverScheduler(config: Config,
time - start + // calculate the nanos since timer start
(ticks * tickNanos) + // adding the desired delay
tickNanos - 1 // rounding up
- ) / tickNanos).toInt // and converting to slot number
+ ) / tickNanos).toInt // and converting to slot number
// tick is an Int that will wrap around, but toInt of futureTick gives us modulo operations
// and the difference (offset) will be correct in any case
val offset = futureTick - tick
diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala
index 660dea44e3..9b074b6c9d 100644
--- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala
+++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala
@@ -24,12 +24,12 @@ import scala.util.control.NonFatal
* and swap out the cell ref.
*/
private[akka] class RepointableActorRef(
- val system: ActorSystemImpl,
- val props: Props,
- val dispatcher: MessageDispatcher,
+ val system: ActorSystemImpl,
+ val props: Props,
+ val dispatcher: MessageDispatcher,
val mailboxType: MailboxType,
- val supervisor: InternalActorRef,
- val path: ActorPath)
+ val supervisor: InternalActorRef,
+ val path: ActorPath)
extends ActorRefWithCell with RepointableRef {
import AbstractActorRef.{ cellOffset, lookupOffset }
@@ -176,10 +176,11 @@ private[akka] class RepointableActorRef(
protected def writeReplace(): AnyRef = SerializedActorRef(this)
}
-private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl,
- val self: RepointableActorRef,
- val props: Props,
- val supervisor: InternalActorRef) extends Cell {
+private[akka] class UnstartedCell(
+ val systemImpl: ActorSystemImpl,
+ val self: RepointableActorRef,
+ val props: Props,
+ val supervisor: InternalActorRef) extends Cell {
/*
* This lock protects all accesses to this cell’s queues. It also ensures
diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala
index 9bca96a592..56ee193990 100644
--- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala
+++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala
@@ -42,10 +42,11 @@ trait Scheduler {
*/
final def schedule(
initialDelay: FiniteDuration,
- interval: FiniteDuration,
- receiver: ActorRef,
- message: Any)(implicit executor: ExecutionContext,
- sender: ActorRef = Actor.noSender): Cancellable =
+ interval: FiniteDuration,
+ receiver: ActorRef,
+ message: Any)(implicit
+ executor: ExecutionContext,
+ sender: ActorRef = Actor.noSender): Cancellable =
schedule(initialDelay, interval, new Runnable {
def run = {
receiver ! message
@@ -71,8 +72,9 @@ trait Scheduler {
*/
final def schedule(
initialDelay: FiniteDuration,
- interval: FiniteDuration)(f: ⇒ Unit)(
- implicit executor: ExecutionContext): Cancellable =
+ interval: FiniteDuration)(f: ⇒ Unit)(
+ implicit
+ executor: ExecutionContext): Cancellable =
schedule(initialDelay, interval, new Runnable { override def run = f })
/**
@@ -93,8 +95,8 @@ trait Scheduler {
*/
def schedule(
initialDelay: FiniteDuration,
- interval: FiniteDuration,
- runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
+ interval: FiniteDuration,
+ runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
/**
* Schedules a message to be sent once with a delay, i.e. a time period that has
@@ -103,10 +105,11 @@ trait Scheduler {
* Java & Scala API
*/
final def scheduleOnce(
- delay: FiniteDuration,
+ delay: FiniteDuration,
receiver: ActorRef,
- message: Any)(implicit executor: ExecutionContext,
- sender: ActorRef = Actor.noSender): Cancellable =
+ message: Any)(implicit
+ executor: ExecutionContext,
+ sender: ActorRef = Actor.noSender): Cancellable =
scheduleOnce(delay, new Runnable {
override def run = receiver ! message
})
@@ -118,7 +121,8 @@ trait Scheduler {
* Scala API
*/
final def scheduleOnce(delay: FiniteDuration)(f: ⇒ Unit)(
- implicit executor: ExecutionContext): Cancellable =
+ implicit
+ executor: ExecutionContext): Cancellable =
scheduleOnce(delay, new Runnable { override def run = f })
/**
@@ -128,7 +132,7 @@ trait Scheduler {
* Java & Scala API
*/
def scheduleOnce(
- delay: FiniteDuration,
+ delay: FiniteDuration,
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
/**
diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
index 9ec9b2e0f3..6c30647dba 100644
--- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala
+++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala
@@ -523,11 +523,11 @@ object TypedProps {
@SerialVersionUID(1L)
final case class TypedProps[T <: AnyRef] protected[TypedProps] (
interfaces: immutable.Seq[Class[_]],
- creator: () ⇒ T,
- dispatcher: String = TypedProps.defaultDispatcherId,
- deploy: Deploy = Props.defaultDeploy,
- timeout: Option[Timeout] = TypedProps.defaultTimeout,
- loader: Option[ClassLoader] = TypedProps.defaultLoader) {
+ creator: () ⇒ T,
+ dispatcher: String = TypedProps.defaultDispatcherId,
+ deploy: Deploy = Props.defaultDeploy,
+ timeout: Option[Timeout] = TypedProps.defaultTimeout,
+ loader: Option[ClassLoader] = TypedProps.defaultLoader) {
/**
* Uses the supplied class as the factory for the TypedActor implementation,
@@ -536,7 +536,8 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] (
* appended in the sequence of interfaces.
*/
def this(implementation: Class[T]) =
- this(interfaces = TypedProps.extractInterfaces(implementation),
+ this(
+ interfaces = TypedProps.extractInterfaces(implementation),
creator = instantiator(implementation))
/**
@@ -546,7 +547,8 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] (
* appended in the sequence of interfaces.
*/
def this(interface: Class[_ >: T], implementation: Creator[T]) =
- this(interfaces = TypedProps.extractInterfaces(interface),
+ this(
+ interfaces = TypedProps.extractInterfaces(interface),
creator = implementation.create _)
/**
@@ -556,7 +558,8 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] (
* appended in the sequence of interfaces.
*/
def this(interface: Class[_ >: T], implementation: Class[T]) =
- this(interfaces = TypedProps.extractInterfaces(interface),
+ this(
+ interfaces = TypedProps.extractInterfaces(interface),
creator = instantiator(implementation))
/**
diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala
index 0906266843..6f186254d9 100644
--- a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala
+++ b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala
@@ -62,7 +62,8 @@ private[akka] trait Dispatch { this: ActorCell ⇒
if (req isInstance mbox.messageQueue) Create(None)
else {
val gotType = if (mbox.messageQueue == null) "null" else mbox.messageQueue.getClass.getName
- Create(Some(ActorInitializationException(self,
+ Create(Some(ActorInitializationException(
+ self,
s"Actor [$self] requires mailbox type [$req] got [$gotType]")))
}
case _ ⇒ Create(None)
diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala
index b0fb0a2625..800e0cfcfa 100644
--- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala
@@ -324,8 +324,8 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites:
case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites)
case fqcn ⇒
val args = List(
- classOf[Config] -> config,
- classOf[DispatcherPrerequisites] -> prerequisites)
+ classOf[Config] → config,
+ classOf[DispatcherPrerequisites] → prerequisites)
prerequisites.dynamicAccess.createInstanceFor[ExecutorServiceConfigurator](fqcn, args).recover({
case exception ⇒ throw new IllegalArgumentException(
("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s],
@@ -379,14 +379,16 @@ object ForkJoinExecutorConfigurator {
/**
* INTERNAL AKKA USAGE ONLY
*/
- final class AkkaForkJoinPool(parallelism: Int,
- threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
- unhandledExceptionHandler: Thread.UncaughtExceptionHandler,
- asyncMode: Boolean)
+ final class AkkaForkJoinPool(
+ parallelism: Int,
+ threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
+ unhandledExceptionHandler: Thread.UncaughtExceptionHandler,
+ asyncMode: Boolean)
extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, asyncMode) with LoadMetrics {
- def this(parallelism: Int,
- threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
- unhandledExceptionHandler: Thread.UncaughtExceptionHandler) = this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true)
+ def this(
+ parallelism: Int,
+ threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
+ unhandledExceptionHandler: Thread.UncaughtExceptionHandler) = this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true)
override def execute(r: Runnable): Unit =
if (r ne null)
@@ -427,9 +429,10 @@ class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrer
case x ⇒ throw new IllegalStateException("The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!")
}
- class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
- val parallelism: Int,
- val asyncMode: Boolean) extends ExecutorServiceFactory {
+ class ForkJoinExecutorServiceFactory(
+ val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
+ val parallelism: Int,
+ val asyncMode: Boolean) extends ExecutorServiceFactory {
def this(threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, parallelism: Int) = this(threadFactory, parallelism, asyncMode = true)
def createExecutorService: ExecutorService = new AkkaForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing, asyncMode)
}
diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala
index d79eed90fa..6568df3ade 100644
--- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala
@@ -30,14 +30,14 @@ import scala.concurrent.duration.FiniteDuration
*/
@deprecated("Use BalancingPool instead of BalancingDispatcher", "2.3")
class BalancingDispatcher(
- _configurator: MessageDispatcherConfigurator,
- _id: String,
- throughput: Int,
- throughputDeadlineTime: Duration,
- _mailboxType: MailboxType,
+ _configurator: MessageDispatcherConfigurator,
+ _id: String,
+ throughput: Int,
+ throughputDeadlineTime: Duration,
+ _mailboxType: MailboxType,
_executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
- _shutdownTimeout: FiniteDuration,
- attemptTeamWork: Boolean)
+ _shutdownTimeout: FiniteDuration,
+ attemptTeamWork: Boolean)
extends Dispatcher(_configurator, _id, throughput, throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
/**
diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
index c962535388..e533faa071 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala
@@ -26,12 +26,12 @@ import java.util.concurrent.atomic.AtomicReferenceFieldUpdater
* Larger values (or zero or negative) increase throughput, smaller values increase fairness
*/
class Dispatcher(
- _configurator: MessageDispatcherConfigurator,
- val id: String,
- val throughput: Int,
- val throughputDeadlineTime: Duration,
+ _configurator: MessageDispatcherConfigurator,
+ val id: String,
+ val throughput: Int,
+ val throughputDeadlineTime: Duration,
executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
- val shutdownTimeout: FiniteDuration)
+ val shutdownTimeout: FiniteDuration)
extends MessageDispatcher(_configurator) {
import configurator.prerequisites._
diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
index 78ffa96527..5ddaca44c7 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala
@@ -30,12 +30,12 @@ trait DispatcherPrerequisites {
* INTERNAL API
*/
private[akka] final case class DefaultDispatcherPrerequisites(
- val threadFactory: ThreadFactory,
- val eventStream: EventStream,
- val scheduler: Scheduler,
- val dynamicAccess: DynamicAccess,
- val settings: ActorSystem.Settings,
- val mailboxes: Mailboxes,
+ val threadFactory: ThreadFactory,
+ val eventStream: EventStream,
+ val scheduler: Scheduler,
+ val dynamicAccess: DynamicAccess,
+ val settings: ActorSystem.Settings,
+ val mailboxes: Mailboxes,
val defaultExecutionContext: Option[ExecutionContext]) extends DispatcherPrerequisites
object Dispatchers {
@@ -135,13 +135,13 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
def simpleName = id.substring(id.lastIndexOf('.') + 1)
idConfig(id)
.withFallback(appConfig)
- .withFallback(ConfigFactory.parseMap(Map("name" -> simpleName).asJava))
+ .withFallback(ConfigFactory.parseMap(Map("name" → simpleName).asJava))
.withFallback(defaultDispatcherConfig)
}
private def idConfig(id: String): Config = {
import scala.collection.JavaConverters._
- ConfigFactory.parseMap(Map("id" -> id).asJava)
+ ConfigFactory.parseMap(Map("id" → id).asJava)
}
/**
@@ -180,7 +180,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
classOf[BalancingDispatcherConfigurator].getName)
case "PinnedDispatcher" ⇒ new PinnedDispatcherConfigurator(cfg, prerequisites)
case fqn ⇒
- val args = List(classOf[Config] -> cfg, classOf[DispatcherPrerequisites] -> prerequisites)
+ val args = List(classOf[Config] → cfg, classOf[DispatcherPrerequisites] → prerequisites)
prerequisites.dynamicAccess.createInstanceFor[MessageDispatcherConfigurator](fqn, args).recover({
case exception ⇒
throw new ConfigurationException(
@@ -288,7 +288,8 @@ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrer
case e: ThreadPoolExecutorConfigurator ⇒ e.threadPoolConfig
case other ⇒
prerequisites.eventStream.publish(
- Warning("PinnedDispatcherConfigurator",
+ Warning(
+ "PinnedDispatcherConfigurator",
this.getClass,
"PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format(
config.getString("id"))))
diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala
index d25c8251af..9c9e9d277b 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Future.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala
@@ -9,7 +9,7 @@ import akka.japi.{ Function ⇒ JFunc, Option ⇒ JOption, Procedure }
import scala.concurrent.{ Future, Promise, ExecutionContext, ExecutionContextExecutor, ExecutionContextExecutorService }
import java.lang.{ Iterable ⇒ JIterable }
import java.util.{ LinkedList ⇒ JLinkedList }
-import java.util.concurrent.{ Executor, ExecutorService, Callable}
+import java.util.concurrent.{ Executor, ExecutorService, Callable }
import scala.util.{ Try, Success, Failure }
import java.util.concurrent.CompletionStage
import java.util.concurrent.CompletableFuture
diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala
index 0e53f4f120..cc834ce986 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala
@@ -54,7 +54,7 @@ private[akka] object Mailbox {
* INTERNAL API
*/
private[akka] abstract class Mailbox(val messageQueue: MessageQueue)
- extends ForkJoinTask[Unit] with SystemMessageQueue with Runnable {
+ extends ForkJoinTask[Unit] with SystemMessageQueue with Runnable {
import Mailbox._
@@ -248,7 +248,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue)
* Process the messages in the mailbox
*/
@tailrec private final def processMailbox(
- left: Int = java.lang.Math.max(dispatcher.throughput, 1),
+ left: Int = java.lang.Math.max(dispatcher.throughput, 1),
deadlineNs: Long = if (dispatcher.isThroughputDeadlineTimeDefined == true) System.nanoTime + dispatcher.throughputDeadlineTime.toNanos else 0L): Unit =
if (shouldProcessMessage) {
val next = dequeue()
@@ -391,7 +391,7 @@ class NodeMessageQueue extends AbstractNodeQueue[Envelope] with MessageQueue wit
* Discards overflowing messages into DeadLetters.
*/
class BoundedNodeMessageQueue(capacity: Int) extends AbstractBoundedNodeQueue[Envelope](capacity)
- with MessageQueue with BoundedMessageQueueSemantics with MultipleConsumerSemantics {
+ with MessageQueue with BoundedMessageQueueSemantics with MultipleConsumerSemantics {
final def pushTimeOut: Duration = Duration.Undefined
final def enqueue(receiver: ActorRef, handle: Envelope): Unit =
@@ -654,10 +654,11 @@ case class NonBlockingBoundedMailbox(val capacity: Int) extends MailboxType with
* BoundedMailbox is the default bounded MailboxType used by Akka Actors.
*/
final case class BoundedMailbox(val capacity: Int, override val pushTimeOut: FiniteDuration)
- extends MailboxType with ProducesMessageQueue[BoundedMailbox.MessageQueue]
- with ProducesPushTimeoutSemanticsMailbox {
+ extends MailboxType with ProducesMessageQueue[BoundedMailbox.MessageQueue]
+ with ProducesPushTimeoutSemanticsMailbox {
- def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
+ def this(settings: ActorSystem.Settings, config: Config) = this(
+ config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative")
@@ -669,7 +670,7 @@ final case class BoundedMailbox(val capacity: Int, override val pushTimeOut: Fin
object BoundedMailbox {
class MessageQueue(capacity: Int, final val pushTimeOut: FiniteDuration)
- extends LinkedBlockingQueue[Envelope](capacity) with BoundedQueueBasedMessageQueue {
+ extends LinkedBlockingQueue[Envelope](capacity) with BoundedQueueBasedMessageQueue {
final def queue: BlockingQueue[Envelope] = this
}
}
@@ -679,7 +680,7 @@ object BoundedMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class UnboundedPriorityMailbox(val cmp: Comparator[Envelope], val initialCapacity: Int)
- extends MailboxType with ProducesMessageQueue[UnboundedPriorityMailbox.MessageQueue] {
+ extends MailboxType with ProducesMessageQueue[UnboundedPriorityMailbox.MessageQueue] {
def this(cmp: Comparator[Envelope]) = this(cmp, 11)
final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue =
new UnboundedPriorityMailbox.MessageQueue(initialCapacity, cmp)
@@ -687,7 +688,7 @@ class UnboundedPriorityMailbox(val cmp: Comparator[Envelope], val initialCapacit
object UnboundedPriorityMailbox {
class MessageQueue(initialCapacity: Int, cmp: Comparator[Envelope])
- extends PriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
+ extends PriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
final def queue: Queue[Envelope] = this
}
}
@@ -697,8 +698,8 @@ object UnboundedPriorityMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, override final val pushTimeOut: Duration)
- extends MailboxType with ProducesMessageQueue[BoundedPriorityMailbox.MessageQueue]
- with ProducesPushTimeoutSemanticsMailbox {
+ extends MailboxType with ProducesMessageQueue[BoundedPriorityMailbox.MessageQueue]
+ with ProducesPushTimeoutSemanticsMailbox {
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative")
if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null")
@@ -709,8 +710,8 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap
object BoundedPriorityMailbox {
class MessageQueue(capacity: Int, cmp: Comparator[Envelope], val pushTimeOut: Duration)
- extends BoundedBlockingQueue[Envelope](capacity, new PriorityQueue[Envelope](11, cmp))
- with BoundedQueueBasedMessageQueue {
+ extends BoundedBlockingQueue[Envelope](capacity, new PriorityQueue[Envelope](11, cmp))
+ with BoundedQueueBasedMessageQueue {
final def queue: BlockingQueue[Envelope] = this
}
}
@@ -721,7 +722,7 @@ object BoundedPriorityMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class UnboundedStablePriorityMailbox(val cmp: Comparator[Envelope], val initialCapacity: Int)
- extends MailboxType with ProducesMessageQueue[UnboundedStablePriorityMailbox.MessageQueue] {
+ extends MailboxType with ProducesMessageQueue[UnboundedStablePriorityMailbox.MessageQueue] {
def this(cmp: Comparator[Envelope]) = this(cmp, 11)
final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue =
new UnboundedStablePriorityMailbox.MessageQueue(initialCapacity, cmp)
@@ -729,7 +730,7 @@ class UnboundedStablePriorityMailbox(val cmp: Comparator[Envelope], val initialC
object UnboundedStablePriorityMailbox {
class MessageQueue(initialCapacity: Int, cmp: Comparator[Envelope])
- extends StablePriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
+ extends StablePriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
final def queue: Queue[Envelope] = this
}
}
@@ -740,8 +741,8 @@ object UnboundedStablePriorityMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class BoundedStablePriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, override final val pushTimeOut: Duration)
- extends MailboxType with ProducesMessageQueue[BoundedStablePriorityMailbox.MessageQueue]
- with ProducesPushTimeoutSemanticsMailbox {
+ extends MailboxType with ProducesMessageQueue[BoundedStablePriorityMailbox.MessageQueue]
+ with ProducesPushTimeoutSemanticsMailbox {
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative")
if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null")
@@ -752,8 +753,8 @@ class BoundedStablePriorityMailbox( final val cmp: Comparator[Envelope], final v
object BoundedStablePriorityMailbox {
class MessageQueue(capacity: Int, cmp: Comparator[Envelope], val pushTimeOut: Duration)
- extends BoundedBlockingQueue[Envelope](capacity, new StablePriorityQueue[Envelope](11, cmp))
- with BoundedQueueBasedMessageQueue {
+ extends BoundedBlockingQueue[Envelope](capacity, new StablePriorityQueue[Envelope](11, cmp))
+ with BoundedQueueBasedMessageQueue {
final def queue: BlockingQueue[Envelope] = this
}
}
@@ -779,10 +780,11 @@ object UnboundedDequeBasedMailbox {
* BoundedDequeBasedMailbox is an bounded MailboxType, backed by a Deque.
*/
case class BoundedDequeBasedMailbox( final val capacity: Int, override final val pushTimeOut: FiniteDuration)
- extends MailboxType with ProducesMessageQueue[BoundedDequeBasedMailbox.MessageQueue]
- with ProducesPushTimeoutSemanticsMailbox {
+ extends MailboxType with ProducesMessageQueue[BoundedDequeBasedMailbox.MessageQueue]
+ with ProducesPushTimeoutSemanticsMailbox {
- def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
+ def this(settings: ActorSystem.Settings, config: Config) = this(
+ config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedDequeBasedMailbox can not be negative")
@@ -794,7 +796,7 @@ case class BoundedDequeBasedMailbox( final val capacity: Int, override final val
object BoundedDequeBasedMailbox {
class MessageQueue(capacity: Int, val pushTimeOut: FiniteDuration)
- extends LinkedBlockingDeque[Envelope](capacity) with BoundedDequeBasedMessageQueue {
+ extends LinkedBlockingDeque[Envelope](capacity) with BoundedDequeBasedMessageQueue {
final val queue = this
}
}
@@ -856,9 +858,10 @@ object UnboundedControlAwareMailbox {
* to allow messages that extend [[akka.dispatch.ControlMessage]] to be delivered with priority.
*/
final case class BoundedControlAwareMailbox(capacity: Int, override final val pushTimeOut: FiniteDuration) extends MailboxType
- with ProducesMessageQueue[BoundedControlAwareMailbox.MessageQueue]
- with ProducesPushTimeoutSemanticsMailbox {
- def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
+ with ProducesMessageQueue[BoundedControlAwareMailbox.MessageQueue]
+ with ProducesPushTimeoutSemanticsMailbox {
+ def this(settings: ActorSystem.Settings, config: Config) = this(
+ config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new BoundedControlAwareMailbox.MessageQueue(capacity, pushTimeOut)
diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala
index 1fb40b643a..9529c7c35c 100644
--- a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala
@@ -23,10 +23,10 @@ object Mailboxes {
}
private[akka] class Mailboxes(
- val settings: ActorSystem.Settings,
+ val settings: ActorSystem.Settings,
val eventStream: EventStream,
- dynamicAccess: DynamicAccess,
- deadLetters: ActorRef) {
+ dynamicAccess: DynamicAccess,
+ deadLetters: ActorRef) {
import Mailboxes._
@@ -187,7 +187,7 @@ private[akka] class Mailboxes(
val mailboxType = conf.getString("mailbox-type") match {
case "" ⇒ throw new ConfigurationException(s"The setting mailbox-type, defined in [$id] is empty")
case fqcn ⇒
- val args = List(classOf[ActorSystem.Settings] -> settings, classOf[Config] -> conf)
+ val args = List(classOf[ActorSystem.Settings] → settings, classOf[Config] → conf)
dynamicAccess.createInstanceFor[MailboxType](fqcn, args).recover({
case exception ⇒
throw new IllegalArgumentException(
@@ -228,7 +228,7 @@ private[akka] class Mailboxes(
//INTERNAL API
private def config(id: String): Config = {
import scala.collection.JavaConverters._
- ConfigFactory.parseMap(Map("id" -> id).asJava)
+ ConfigFactory.parseMap(Map("id" → id).asJava)
.withFallback(settings.config.getConfig(id))
.withFallback(defaultMailboxConfig)
}
diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala
index 3eba061d19..12c1d58a76 100644
--- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala
@@ -15,12 +15,13 @@ import scala.concurrent.duration.FiniteDuration
* the `lookup` method in [[akka.dispatch.Dispatchers]].
*/
class PinnedDispatcher(
- _configurator: MessageDispatcherConfigurator,
- _actor: ActorCell,
- _id: String,
- _shutdownTimeout: FiniteDuration,
+ _configurator: MessageDispatcherConfigurator,
+ _actor: ActorCell,
+ _id: String,
+ _shutdownTimeout: FiniteDuration,
_threadPoolConfig: ThreadPoolConfig)
- extends Dispatcher(_configurator,
+ extends Dispatcher(
+ _configurator,
_id,
Int.MaxValue,
Duration.Zero,
diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
index ce88a4340d..cb9ed25f73 100644
--- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala
@@ -65,12 +65,13 @@ trait ExecutorServiceFactoryProvider {
/**
* A small configuration DSL to create ThreadPoolExecutors that can be provided as an ExecutorServiceFactoryProvider to Dispatcher
*/
-final case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout,
- corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize,
- maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize,
- threadTimeout: Duration = ThreadPoolConfig.defaultTimeout,
- queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(),
- rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy)
+final case class ThreadPoolConfig(
+ allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout,
+ corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize,
+ maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize,
+ threadTimeout: Duration = ThreadPoolConfig.defaultTimeout,
+ queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(),
+ rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy)
extends ExecutorServiceFactoryProvider {
class ThreadPoolExecutorServiceFactory(val threadFactory: ThreadFactory) extends ExecutorServiceFactory {
def createExecutorService: ExecutorService = {
@@ -173,11 +174,12 @@ object MonitorableThreadFactory {
}
}
-final case class MonitorableThreadFactory(name: String,
- daemonic: Boolean,
- contextClassLoader: Option[ClassLoader],
- exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing,
- protected val counter: AtomicLong = new AtomicLong)
+final case class MonitorableThreadFactory(
+ name: String,
+ daemonic: Boolean,
+ contextClassLoader: Option[ClassLoader],
+ exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing,
+ protected val counter: AtomicLong = new AtomicLong)
extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory {
def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = {
diff --git a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
index 5dc30913f3..9c0da4a6d2 100644
--- a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
+++ b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala
@@ -261,6 +261,6 @@ private[akka] final case class Failed(child: ActorRef, cause: Throwable, uid: In
@SerialVersionUID(1L)
private[akka] final case class DeathWatchNotification(
- actor: ActorRef,
+ actor: ActorRef,
existenceConfirmed: Boolean,
- addressTerminated: Boolean) extends SystemMessage with DeadLetterSuppression
+ addressTerminated: Boolean) extends SystemMessage with DeadLetterSuppression
diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala
index c602bd2ce9..b2dbae6ea3 100644
--- a/akka-actor/src/main/scala/akka/event/Logging.scala
+++ b/akka-actor/src/main/scala/akka/event/Logging.scala
@@ -572,9 +572,9 @@ object Logging {
}
/**
- * Obtain LoggingAdapter with MDC support for the given actor.
- * Don't use it outside its specific Actor as it isn't thread safe
- */
+ * Obtain LoggingAdapter with MDC support for the given actor.
+ * Don't use it outside its specific Actor as it isn't thread safe
+ */
def getLogger(logSource: Actor): DiagnosticLoggingAdapter = apply(logSource)
/**
diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala
index 6ee9aab3ca..9a342d5b4f 100644
--- a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala
+++ b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala
@@ -58,7 +58,7 @@ object SimpleDnsCache {
new Cache(
queue + new ExpiryEntry(answer.name, until),
- cache + (answer.name -> CacheEntry(answer, until)),
+ cache + (answer.name → CacheEntry(answer, until)),
clock)
}
diff --git a/akka-actor/src/main/scala/akka/io/Tcp.scala b/akka-actor/src/main/scala/akka/io/Tcp.scala
index d8f42f721e..37bae8e5ce 100644
--- a/akka-actor/src/main/scala/akka/io/Tcp.scala
+++ b/akka-actor/src/main/scala/akka/io/Tcp.scala
@@ -110,11 +110,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* @param localAddress optionally specifies a specific address to bind to
* @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/
- final case class Connect(remoteAddress: InetSocketAddress,
- localAddress: Option[InetSocketAddress] = None,
- options: immutable.Traversable[SocketOption] = Nil,
- timeout: Option[FiniteDuration] = None,
- pullMode: Boolean = false) extends Command
+ final case class Connect(
+ remoteAddress: InetSocketAddress,
+ localAddress: Option[InetSocketAddress] = None,
+ options: immutable.Traversable[SocketOption] = Nil,
+ timeout: Option[FiniteDuration] = None,
+ pullMode: Boolean = false) extends Command
/**
* The Bind message is send to the TCP manager actor, which is obtained via
@@ -135,11 +136,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
*
* @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/
- final case class Bind(handler: ActorRef,
- localAddress: InetSocketAddress,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- pullMode: Boolean = false) extends Command
+ final case class Bind(
+ handler: ActorRef,
+ localAddress: InetSocketAddress,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ pullMode: Boolean = false) extends Command
/**
* This message must be sent to a TCP connection actor after receiving the
@@ -624,11 +626,12 @@ object TcpMessage {
* @param timeout is the desired connection timeout, `null` means "no timeout"
* @param pullMode enables pull based reading from the connection
*/
- def connect(remoteAddress: InetSocketAddress,
- localAddress: InetSocketAddress,
- options: JIterable[SocketOption],
- timeout: FiniteDuration,
- pullMode: Boolean): Command = Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode)
+ def connect(
+ remoteAddress: InetSocketAddress,
+ localAddress: InetSocketAddress,
+ options: JIterable[SocketOption],
+ timeout: FiniteDuration,
+ pullMode: Boolean): Command = Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode)
/**
* Connect to the given `remoteAddress` without binding to a local address and without
@@ -658,17 +661,19 @@ object TcpMessage {
* @param pullMode enables pull based accepting and of connections and pull
* based reading from the accepted connections.
*/
- def bind(handler: ActorRef,
- endpoint: InetSocketAddress,
- backlog: Int,
- options: JIterable[SocketOption],
- pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode)
+ def bind(
+ handler: ActorRef,
+ endpoint: InetSocketAddress,
+ backlog: Int,
+ options: JIterable[SocketOption],
+ pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode)
/**
* Open a listening socket without specifying options.
*/
- def bind(handler: ActorRef,
- endpoint: InetSocketAddress,
- backlog: Int): Command = Bind(handler, endpoint, backlog, Nil)
+ def bind(
+ handler: ActorRef,
+ endpoint: InetSocketAddress,
+ backlog: Int): Command = Bind(handler, endpoint, backlog, Nil)
/**
* This message must be sent to a TCP connection actor after receiving the
diff --git a/akka-actor/src/main/scala/akka/io/TcpConnection.scala b/akka-actor/src/main/scala/akka/io/TcpConnection.scala
index 6cd9d60280..d97be3d1ff 100644
--- a/akka-actor/src/main/scala/akka/io/TcpConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpConnection.scala
@@ -388,9 +388,9 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
class PendingBufferWrite(
val commander: ActorRef,
remainingData: ByteString,
- ack: Any,
- buffer: ByteBuffer,
- tail: WriteCommand) extends PendingWrite {
+ ack: Any,
+ buffer: ByteBuffer,
+ tail: WriteCommand) extends PendingWrite {
def doWrite(info: ConnectionInfo): PendingWrite = {
@tailrec def writeToChannel(data: ByteString): PendingWrite = {
@@ -429,11 +429,11 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
class PendingWriteFile(
val commander: ActorRef,
- fileChannel: FileChannel,
- offset: Long,
- remaining: Long,
- ack: Event,
- tail: WriteCommand) extends PendingWrite with Runnable {
+ fileChannel: FileChannel,
+ offset: Long,
+ remaining: Long,
+ ack: Event,
+ tail: WriteCommand) extends PendingWrite with Runnable {
def doWrite(info: ConnectionInfo): PendingWrite = {
tcp.fileIoDispatcher.execute(this)
@@ -479,10 +479,11 @@ private[io] object TcpConnection {
/**
* Groups required connection-related data that are only available once the connection has been fully established.
*/
- final case class ConnectionInfo(registration: ChannelRegistration,
- handler: ActorRef,
- keepOpenOnPeerClosed: Boolean,
- useResumeWriting: Boolean)
+ final case class ConnectionInfo(
+ registration: ChannelRegistration,
+ handler: ActorRef,
+ keepOpenOnPeerClosed: Boolean,
+ useResumeWriting: Boolean)
// INTERNAL MESSAGES
diff --git a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala
index 689a5b8e62..f7efe2bbf1 100644
--- a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala
@@ -15,12 +15,13 @@ import akka.io.Inet.SocketOption
*
* INTERNAL API
*/
-private[io] class TcpIncomingConnection(_tcp: TcpExt,
- _channel: SocketChannel,
- registry: ChannelRegistry,
- bindHandler: ActorRef,
- options: immutable.Traversable[SocketOption],
- readThrottling: Boolean)
+private[io] class TcpIncomingConnection(
+ _tcp: TcpExt,
+ _channel: SocketChannel,
+ registry: ChannelRegistry,
+ bindHandler: ActorRef,
+ options: immutable.Traversable[SocketOption],
+ readThrottling: Boolean)
extends TcpConnection(_tcp, _channel, readThrottling) {
signDeathPact(bindHandler)
diff --git a/akka-actor/src/main/scala/akka/io/TcpListener.scala b/akka-actor/src/main/scala/akka/io/TcpListener.scala
index ddf4c9bc82..0f5ca05e16 100644
--- a/akka-actor/src/main/scala/akka/io/TcpListener.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpListener.scala
@@ -31,11 +31,12 @@ private[io] object TcpListener {
/**
* INTERNAL API
*/
-private[io] class TcpListener(selectorRouter: ActorRef,
- tcp: TcpExt,
- channelRegistry: ChannelRegistry,
- bindCommander: ActorRef,
- bind: Bind)
+private[io] class TcpListener(
+ selectorRouter: ActorRef,
+ tcp: TcpExt,
+ channelRegistry: ChannelRegistry,
+ bindCommander: ActorRef,
+ bind: Bind)
extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import TcpListener._
diff --git a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala
index e6fce9c7f0..05f64eb35b 100644
--- a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala
@@ -19,10 +19,11 @@ import akka.io.Tcp._
*
* INTERNAL API
*/
-private[io] class TcpOutgoingConnection(_tcp: TcpExt,
- channelRegistry: ChannelRegistry,
- commander: ActorRef,
- connect: Connect)
+private[io] class TcpOutgoingConnection(
+ _tcp: TcpExt,
+ channelRegistry: ChannelRegistry,
+ commander: ActorRef,
+ connect: Connect)
extends TcpConnection(_tcp, SocketChannel.open().configureBlocking(false).asInstanceOf[SocketChannel], connect.pullMode) {
import context._
diff --git a/akka-actor/src/main/scala/akka/io/Udp.scala b/akka-actor/src/main/scala/akka/io/Udp.scala
index 035cd315ff..8bc3e425b0 100644
--- a/akka-actor/src/main/scala/akka/io/Udp.scala
+++ b/akka-actor/src/main/scala/akka/io/Udp.scala
@@ -92,9 +92,10 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider {
* The listener actor for the newly bound port will reply with a [[Bound]]
* message, or the manager will reply with a [[CommandFailed]] message.
*/
- final case class Bind(handler: ActorRef,
- localAddress: InetSocketAddress,
- options: immutable.Traversable[SocketOption] = Nil) extends Command
+ final case class Bind(
+ handler: ActorRef,
+ localAddress: InetSocketAddress,
+ options: immutable.Traversable[SocketOption] = Nil) extends Command
/**
* Send this message to the listener actor that previously sent a [[Bound]]
diff --git a/akka-actor/src/main/scala/akka/io/UdpConnected.scala b/akka-actor/src/main/scala/akka/io/UdpConnected.scala
index 1d9a39c3e5..6c611abb1b 100644
--- a/akka-actor/src/main/scala/akka/io/UdpConnected.scala
+++ b/akka-actor/src/main/scala/akka/io/UdpConnected.scala
@@ -84,10 +84,11 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide
* which is restricted to sending to and receiving from the given `remoteAddress`.
* All received datagrams will be sent to the designated `handler` actor.
*/
- final case class Connect(handler: ActorRef,
- remoteAddress: InetSocketAddress,
- localAddress: Option[InetSocketAddress] = None,
- options: immutable.Traversable[SocketOption] = Nil) extends Command
+ final case class Connect(
+ handler: ActorRef,
+ remoteAddress: InetSocketAddress,
+ localAddress: Option[InetSocketAddress] = None,
+ options: immutable.Traversable[SocketOption] = Nil) extends Command
/**
* Send this message to a connection actor (which had previously sent the
@@ -176,21 +177,24 @@ object UdpConnectedMessage {
* which is restricted to sending to and receiving from the given `remoteAddress`.
* All received datagrams will be sent to the designated `handler` actor.
*/
- def connect(handler: ActorRef,
- remoteAddress: InetSocketAddress,
- localAddress: InetSocketAddress,
- options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options)
+ def connect(
+ handler: ActorRef,
+ remoteAddress: InetSocketAddress,
+ localAddress: InetSocketAddress,
+ options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options)
/**
* Connect without specifying the `localAddress`.
*/
- def connect(handler: ActorRef,
- remoteAddress: InetSocketAddress,
- options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options)
+ def connect(
+ handler: ActorRef,
+ remoteAddress: InetSocketAddress,
+ options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options)
/**
* Connect without specifying the `localAddress` or `options`.
*/
- def connect(handler: ActorRef,
- remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil)
+ def connect(
+ handler: ActorRef,
+ remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil)
/**
* This message is understood by the connection actors to send data to their
diff --git a/akka-actor/src/main/scala/akka/io/UdpConnection.scala b/akka-actor/src/main/scala/akka/io/UdpConnection.scala
index 0f3051d128..e391f27590 100644
--- a/akka-actor/src/main/scala/akka/io/UdpConnection.scala
+++ b/akka-actor/src/main/scala/akka/io/UdpConnection.scala
@@ -18,10 +18,11 @@ import akka.io.UdpConnected._
/**
* INTERNAL API
*/
-private[io] class UdpConnection(udpConn: UdpConnectedExt,
- channelRegistry: ChannelRegistry,
- commander: ActorRef,
- connect: Connect)
+private[io] class UdpConnection(
+ udpConn: UdpConnectedExt,
+ channelRegistry: ChannelRegistry,
+ commander: ActorRef,
+ connect: Connect)
extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import connect._
@@ -153,7 +154,8 @@ private[io] class UdpConnection(udpConn: UdpConnectedExt,
thunk
} catch {
case NonFatal(e) ⇒
- log.debug("Failure while connecting UDP channel to remote address [{}] local address [{}]: {}",
+ log.debug(
+ "Failure while connecting UDP channel to remote address [{}] local address [{}]: {}",
remoteAddress, localAddress.getOrElse("undefined"), e)
commander ! CommandFailed(connect)
context.stop(self)
diff --git a/akka-actor/src/main/scala/akka/io/UdpListener.scala b/akka-actor/src/main/scala/akka/io/UdpListener.scala
index 96708869fc..a071b9d6c8 100644
--- a/akka-actor/src/main/scala/akka/io/UdpListener.scala
+++ b/akka-actor/src/main/scala/akka/io/UdpListener.scala
@@ -19,10 +19,11 @@ import akka.io.Udp._
/**
* INTERNAL API
*/
-private[io] class UdpListener(val udp: UdpExt,
- channelRegistry: ChannelRegistry,
- bindCommander: ActorRef,
- bind: Bind)
+private[io] class UdpListener(
+ val udp: UdpExt,
+ channelRegistry: ChannelRegistry,
+ bindCommander: ActorRef,
+ bind: Bind)
extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import udp.bufferPool
diff --git a/akka-actor/src/main/scala/akka/io/UdpSender.scala b/akka-actor/src/main/scala/akka/io/UdpSender.scala
index a52bfc4f2e..c96e43b6d5 100644
--- a/akka-actor/src/main/scala/akka/io/UdpSender.scala
+++ b/akka-actor/src/main/scala/akka/io/UdpSender.scala
@@ -14,10 +14,11 @@ import akka.actor._
/**
* INTERNAL API
*/
-private[io] class UdpSender(val udp: UdpExt,
- channelRegistry: ChannelRegistry,
- commander: ActorRef,
- options: immutable.Traversable[SocketOption])
+private[io] class UdpSender(
+ val udp: UdpExt,
+ channelRegistry: ChannelRegistry,
+ commander: ActorRef,
+ options: immutable.Traversable[SocketOption])
extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
val channel = {
diff --git a/akka-actor/src/main/scala/akka/io/WithUdpSend.scala b/akka-actor/src/main/scala/akka/io/WithUdpSend.scala
index 5759650c8a..7063e8b049 100644
--- a/akka-actor/src/main/scala/akka/io/WithUdpSend.scala
+++ b/akka-actor/src/main/scala/akka/io/WithUdpSend.scala
@@ -51,7 +51,8 @@ private[io] trait WithUdpSend {
} catch {
case NonFatal(e) ⇒
sender() ! CommandFailed(send)
- log.debug("Failure while sending UDP datagram to remote address [{}]: {}",
+ log.debug(
+ "Failure while sending UDP datagram to remote address [{}]: {}",
send.target, e)
retriedSend = false
pendingSend = null
diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala
index f575148609..8541459d84 100644
--- a/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala
+++ b/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala
@@ -16,12 +16,12 @@ import akka.actor.SupervisorStrategy._
*/
private class BackoffOnRestartSupervisor(
val childProps: Props,
- val childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- val reset: BackoffReset,
- randomFactor: Double,
- strategy: OneForOneStrategy)
+ val childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ val reset: BackoffReset,
+ randomFactor: Double,
+ strategy: OneForOneStrategy)
extends Actor with HandleBackoff
with ActorLogging {
diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala
index 36a5dca998..d7af6f7ea7 100644
--- a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala
+++ b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala
@@ -70,10 +70,10 @@ object Backoff {
* In order to skip this additional delay pass in `0`.
*/
def onFailure(
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
randomFactor: Double): BackoffOptions =
BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
@@ -131,10 +131,10 @@ object Backoff {
* In order to skip this additional delay pass in `0`.
*/
def onStop(
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
randomFactor: Double): BackoffOptions =
BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
}
@@ -183,14 +183,14 @@ trait BackoffOptions {
}
private final case class BackoffOptionsImpl(
- backoffType: BackoffType = RestartImpliesFailure,
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
- reset: Option[BackoffReset] = None,
- supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider)) extends BackoffOptions {
+ backoffType: BackoffType = RestartImpliesFailure,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
+ reset: Option[BackoffReset] = None,
+ supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider)) extends BackoffOptions {
val backoffReset = reset.getOrElse(AutoReset(minBackoff))
diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala
index aa19dd0d1f..7f09d07929 100644
--- a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala
+++ b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala
@@ -37,10 +37,10 @@ object BackoffSupervisor {
* In order to skip this additional delay pass in `0`.
*/
def props(
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
randomFactor: Double): Props = {
propsWithSupervisorStrategy(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy)
}
@@ -66,12 +66,12 @@ object BackoffSupervisor {
* in the child
*/
def propsWithSupervisorStrategy(
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
randomFactor: Double,
- strategy: SupervisorStrategy): Props = {
+ strategy: SupervisorStrategy): Props = {
require(minBackoff > Duration.Zero, "minBackoff must be > 0")
require(maxBackoff >= minBackoff, "maxBackoff must be >= minBackoff")
require(0.0 <= randomFactor && randomFactor <= 1.0, "randomFactor must be between 0.0 and 1.0")
@@ -145,8 +145,8 @@ object BackoffSupervisor {
*/
private[akka] def calculateDelay(
restartCount: Int,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
randomFactor: Double): FiniteDuration = {
val rnd = 1.0 + ThreadLocalRandom.current().nextDouble() * randomFactor
if (restartCount >= 30) // Duration overflow protection (> 100 years)
@@ -166,12 +166,12 @@ object BackoffSupervisor {
*/
final class BackoffSupervisor(
val childProps: Props,
- val childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- val reset: BackoffReset,
- randomFactor: Double,
- strategy: SupervisorStrategy)
+ val childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ val reset: BackoffReset,
+ randomFactor: Double,
+ strategy: SupervisorStrategy)
extends Actor with HandleBackoff {
import BackoffSupervisor._
@@ -192,20 +192,20 @@ final class BackoffSupervisor(
// for binary compatibility with 2.4.1
def this(
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
- randomFactor: Double,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
+ randomFactor: Double,
supervisorStrategy: SupervisorStrategy) =
this(childProps, childName, minBackoff, maxBackoff, AutoReset(minBackoff), randomFactor, supervisorStrategy)
// for binary compatibility with 2.4.0
def this(
- childProps: Props,
- childName: String,
- minBackoff: FiniteDuration,
- maxBackoff: FiniteDuration,
+ childProps: Props,
+ childName: String,
+ minBackoff: FiniteDuration,
+ maxBackoff: FiniteDuration,
randomFactor: Double) =
this(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy)
diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala
index 2d83675bc9..9e0a52c2ce 100644
--- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala
+++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala
@@ -515,5 +515,5 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
*/
class CircuitBreakerOpenException(
val remainingDuration: FiniteDuration,
- message: String = "Circuit Breaker is open; calls are failing fast")
+ message: String = "Circuit Breaker is open; calls are failing fast")
extends AkkaException(message) with NoStackTrace
diff --git a/akka-actor/src/main/scala/akka/routing/Balancing.scala b/akka-actor/src/main/scala/akka/routing/Balancing.scala
index e7210b7039..3660b28aa3 100644
--- a/akka-actor/src/main/scala/akka/routing/Balancing.scala
+++ b/akka-actor/src/main/scala/akka/routing/Balancing.scala
@@ -66,9 +66,9 @@ private[akka] final class BalancingRoutingLogic extends RoutingLogic {
*/
@SerialVersionUID(1L)
final case class BalancingPool(
- override val nrOfInstances: Int,
+ override val nrOfInstances: Int,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Pool {
def this(config: Config) =
@@ -112,12 +112,14 @@ final case class BalancingPool(
// dispatcher of this pool
val deployDispatcherConfigPath = s"akka.actor.deployment.$deployPath.pool-dispatcher"
val systemConfig = context.system.settings.config
- val dispatcherConfig = context.system.dispatchers.config(dispatcherId,
+ val dispatcherConfig = context.system.dispatchers.config(
+ dispatcherId,
// use the user defined 'pool-dispatcher' config as fallback, if any
if (systemConfig.hasPath(deployDispatcherConfigPath)) systemConfig.getConfig(deployDispatcherConfigPath)
else ConfigFactory.empty)
- dispatchers.registerConfigurator(dispatcherId, new BalancingDispatcherConfigurator(dispatcherConfig,
+ dispatchers.registerConfigurator(dispatcherId, new BalancingDispatcherConfigurator(
+ dispatcherConfig,
dispatchers.prerequisites))
}
diff --git a/akka-actor/src/main/scala/akka/routing/Broadcast.scala b/akka-actor/src/main/scala/akka/routing/Broadcast.scala
index fbc54e4af2..b71f3153bf 100644
--- a/akka-actor/src/main/scala/akka/routing/Broadcast.scala
+++ b/akka-actor/src/main/scala/akka/routing/Broadcast.scala
@@ -58,8 +58,8 @@ final class BroadcastRoutingLogic extends RoutingLogic {
final case class BroadcastPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[BroadcastPool] {
def this(config: Config) =
@@ -118,8 +118,8 @@ final case class BroadcastPool(
*/
@SerialVersionUID(1L)
final case class BroadcastGroup(
- override val paths: immutable.Iterable[String],
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ override val paths: immutable.Iterable[String],
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala
index 69671a134a..15b5101e7b 100644
--- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala
+++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala
@@ -39,7 +39,8 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v
*/
def :+(node: T): ConsistentHash[T] = {
val nodeHash = hashFor(node.toString)
- new ConsistentHash(nodes ++ ((1 to virtualNodesFactor) map { r ⇒ (concatenateNodeHash(nodeHash, r) -> node) }),
+ new ConsistentHash(
+ nodes ++ ((1 to virtualNodesFactor) map { r ⇒ (concatenateNodeHash(nodeHash, r) → node) }),
virtualNodesFactor)
}
@@ -57,7 +58,8 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v
*/
def :-(node: T): ConsistentHash[T] = {
val nodeHash = hashFor(node.toString)
- new ConsistentHash(nodes -- ((1 to virtualNodesFactor) map { r ⇒ concatenateNodeHash(nodeHash, r) }),
+ new ConsistentHash(
+ nodes -- ((1 to virtualNodesFactor) map { r ⇒ concatenateNodeHash(nodeHash, r) }),
virtualNodesFactor)
}
@@ -110,12 +112,13 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v
object ConsistentHash {
def apply[T: ClassTag](nodes: Iterable[T], virtualNodesFactor: Int): ConsistentHash[T] = {
- new ConsistentHash(immutable.SortedMap.empty[Int, T] ++
- (for {
- node ← nodes
- nodeHash = hashFor(node.toString)
- vnode ← 1 to virtualNodesFactor
- } yield (concatenateNodeHash(nodeHash, vnode) -> node)),
+ new ConsistentHash(
+ immutable.SortedMap.empty[Int, T] ++
+ (for {
+ node ← nodes
+ nodeHash = hashFor(node.toString)
+ vnode ← 1 to virtualNodesFactor
+ } yield (concatenateNodeHash(nodeHash, vnode) → node)),
virtualNodesFactor)
}
diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala
index 34cbb549ab..78dd4ff505 100644
--- a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala
+++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala
@@ -135,9 +135,9 @@ object ConsistentHashingRoutingLogic {
*/
@SerialVersionUID(1L)
final case class ConsistentHashingRoutingLogic(
- system: ActorSystem,
- virtualNodesFactor: Int = 0,
- hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
+ system: ActorSystem,
+ virtualNodesFactor: Int = 0,
+ hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
extends RoutingLogic {
import ConsistentHashingRouter._
@@ -219,7 +219,8 @@ final case class ConsistentHashingRoutingLogic(
case _ if hashMapping.isDefinedAt(message) ⇒ target(hashMapping(message))
case hashable: ConsistentHashable ⇒ target(hashable.consistentHashKey)
case other ⇒
- log.warning("Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
+ log.warning(
+ "Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
message.getClass.getName, classOf[ConsistentHashable].getName,
classOf[ConsistentHashableEnvelope].getName)
NoRoutee
@@ -266,13 +267,13 @@ final case class ConsistentHashingRoutingLogic(
*/
@SerialVersionUID(1L)
final case class ConsistentHashingPool(
- override val nrOfInstances: Int,
- override val resizer: Option[Resizer] = None,
- val virtualNodesFactor: Int = 0,
- val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
- override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val nrOfInstances: Int,
+ override val resizer: Option[Resizer] = None,
+ val virtualNodesFactor: Int = 0,
+ val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[ConsistentHashingPool] {
def this(config: Config) =
@@ -354,10 +355,10 @@ final case class ConsistentHashingPool(
*/
@SerialVersionUID(1L)
final case class ConsistentHashingGroup(
- override val paths: immutable.Iterable[String],
- val virtualNodesFactor: Int = 0,
- val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ override val paths: immutable.Iterable[String],
+ val virtualNodesFactor: Int = 0,
+ val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala
index d14704250b..bf2da2c760 100644
--- a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala
+++ b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala
@@ -44,9 +44,9 @@ case object OptimalSizeExploringResizer {
*/
private[routing] case class ResizeRecord(
underutilizationStreak: Option[UnderUtilizationStreak] = None,
- messageCount: Long = 0,
- totalQueueLength: Int = 0,
- checkTime: Long = 0)
+ messageCount: Long = 0,
+ totalQueueLength: Int = 0,
+ checkTime: Long = 0)
/**
* INTERNAL API
@@ -115,16 +115,16 @@ case object OptimalSizeExploringResizer {
*/
@SerialVersionUID(1L)
case class DefaultOptimalSizeExploringResizer(
- lowerBound: PoolSize = 1,
- upperBound: PoolSize = 30,
- chanceOfScalingDownWhenFull: Double = 0.2,
- actionInterval: Duration = 5.seconds,
- numOfAdjacentSizesToConsiderDuringOptimization: Int = 16,
- exploreStepSize: Double = 0.1,
- downsizeRatio: Double = 0.8,
- downsizeAfterUnderutilizedFor: Duration = 72.hours,
- explorationProbability: Double = 0.4,
- weightOfLatestMetric: Double = 0.5) extends OptimalSizeExploringResizer {
+ lowerBound: PoolSize = 1,
+ upperBound: PoolSize = 30,
+ chanceOfScalingDownWhenFull: Double = 0.2,
+ actionInterval: Duration = 5.seconds,
+ numOfAdjacentSizesToConsiderDuringOptimization: Int = 16,
+ exploreStepSize: Double = 0.1,
+ downsizeRatio: Double = 0.8,
+ downsizeAfterUnderutilizedFor: Duration = 72.hours,
+ explorationProbability: Double = 0.4,
+ weightOfLatestMetric: Double = 0.5) extends OptimalSizeExploringResizer {
/**
* Leave package accessible for testing purpose
*/
diff --git a/akka-actor/src/main/scala/akka/routing/Random.scala b/akka-actor/src/main/scala/akka/routing/Random.scala
index 7984033f93..19f00e19d9 100644
--- a/akka-actor/src/main/scala/akka/routing/Random.scala
+++ b/akka-actor/src/main/scala/akka/routing/Random.scala
@@ -59,8 +59,8 @@ final class RandomRoutingLogic extends RoutingLogic {
final case class RandomPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[RandomPool] {
def this(config: Config) =
@@ -119,8 +119,8 @@ final case class RandomPool(
*/
@SerialVersionUID(1L)
final case class RandomGroup(
- override val paths: immutable.Iterable[String],
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ override val paths: immutable.Iterable[String],
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/Resizer.scala b/akka-actor/src/main/scala/akka/routing/Resizer.scala
index b1b57d742b..dc0eb93eb3 100644
--- a/akka-actor/src/main/scala/akka/routing/Resizer.scala
+++ b/akka-actor/src/main/scala/akka/routing/Resizer.scala
@@ -126,13 +126,13 @@ case object DefaultResizer {
*/
@SerialVersionUID(1L)
case class DefaultResizer(
- val lowerBound: Int = 1,
- val upperBound: Int = 10,
- val pressureThreshold: Int = 1,
- val rampupRate: Double = 0.2,
- val backoffThreshold: Double = 0.3,
- val backoffRate: Double = 0.1,
- val messagesPerResize: Int = 10) extends Resizer {
+ val lowerBound: Int = 1,
+ val upperBound: Int = 10,
+ val pressureThreshold: Int = 1,
+ val rampupRate: Double = 0.2,
+ val backoffThreshold: Double = 0.3,
+ val backoffRate: Double = 0.1,
+ val messagesPerResize: Int = 10) extends Resizer {
/**
* Java API constructor for default values except bounds.
@@ -246,13 +246,13 @@ case class DefaultResizer(
* INTERNAL API
*/
private[akka] final class ResizablePoolCell(
- _system: ActorSystemImpl,
- _ref: InternalActorRef,
- _routerProps: Props,
+ _system: ActorSystemImpl,
+ _ref: InternalActorRef,
+ _routerProps: Props,
_routerDispatcher: MessageDispatcher,
- _routeeProps: Props,
- _supervisor: InternalActorRef,
- val pool: Pool)
+ _routeeProps: Props,
+ _supervisor: InternalActorRef,
+ val pool: Pool)
extends RoutedActorCell(_system, _ref, _routerProps, _routerDispatcher, _routeeProps, _supervisor) {
require(pool.resizer.isDefined, "RouterConfig must be a Pool with defined resizer")
diff --git a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala
index 3ae0016815..0e983bc027 100644
--- a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala
+++ b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala
@@ -67,12 +67,13 @@ final class RoundRobinRoutingLogic extends RoutingLogic {
final case class RoundRobinPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[RoundRobinPool] {
def this(config: Config) =
- this(nrOfInstances = config.getInt("nr-of-instances"),
+ this(
+ nrOfInstances = config.getInt("nr-of-instances"),
resizer = Resizer.fromConfig(config),
usePoolDispatcher = config.hasPath("pool-dispatcher"))
@@ -127,8 +128,8 @@ final case class RoundRobinPool(
*/
@SerialVersionUID(1L)
final case class RoundRobinGroup(
- override val paths: immutable.Iterable[String],
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ override val paths: immutable.Iterable[String],
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala
index 4c10cabd2b..7b37408fc9 100644
--- a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala
+++ b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala
@@ -35,12 +35,12 @@ private[akka] object RoutedActorCell {
* INTERNAL API
*/
private[akka] class RoutedActorCell(
- _system: ActorSystemImpl,
- _ref: InternalActorRef,
- _routerProps: Props,
+ _system: ActorSystemImpl,
+ _ref: InternalActorRef,
+ _routerProps: Props,
_routerDispatcher: MessageDispatcher,
- val routeeProps: Props,
- _supervisor: InternalActorRef)
+ val routeeProps: Props,
+ _supervisor: InternalActorRef)
extends ActorCell(_system, _ref, _routerProps, _routerDispatcher, _supervisor) {
private[akka] val routerConfig = _routerProps.routerConfig
@@ -154,8 +154,9 @@ private[akka] class RouterActor extends Actor {
}
val routingLogicController: Option[ActorRef] = cell.routerConfig.routingLogicController(
- cell.router.logic).map(props ⇒ context.actorOf(props.withDispatcher(context.props.dispatcher),
- name = "routingLogicController"))
+ cell.router.logic).map(props ⇒ context.actorOf(
+ props.withDispatcher(context.props.dispatcher),
+ name = "routingLogicController"))
def receive = {
case GetRoutees ⇒
diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala
index 4cfade0d27..3e52b4c70e 100644
--- a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala
+++ b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala
@@ -22,13 +22,13 @@ import akka.dispatch.MessageDispatcher
* send a message to one (or more) of these actors.
*/
private[akka] class RoutedActorRef(
- _system: ActorSystemImpl,
- _routerProps: Props,
+ _system: ActorSystemImpl,
+ _routerProps: Props,
_routerDispatcher: MessageDispatcher,
- _routerMailbox: MailboxType,
- _routeeProps: Props,
- _supervisor: InternalActorRef,
- _path: ActorPath)
+ _routerMailbox: MailboxType,
+ _routeeProps: Props,
+ _supervisor: InternalActorRef,
+ _path: ActorPath)
extends RepointableActorRef(_system, _routerProps, _routerDispatcher, _routerMailbox, _supervisor, _path) {
// verify that a BalancingDispatcher is not used with a Router
diff --git a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala
index 347493d283..27f4df9085 100644
--- a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala
+++ b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala
@@ -3,7 +3,6 @@
*/
package akka.routing
-
import scala.collection.immutable
import akka.ConfigurationException
import akka.actor.ActorContext
@@ -282,9 +281,9 @@ case object FromConfig extends FromConfig {
*/
def getInstance = this
@inline final def apply(
- resizer: Option[Resizer] = None,
+ resizer: Option[Resizer] = None,
supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- routerDispatcher: String = Dispatchers.DefaultDispatcherId) =
+ routerDispatcher: String = Dispatchers.DefaultDispatcherId) =
new FromConfig(resizer, supervisorStrategy, routerDispatcher)
@inline final def unapply(fc: FromConfig): Option[String] = Some(fc.routerDispatcher)
@@ -297,9 +296,10 @@ case object FromConfig extends FromConfig {
* (defaults to default-dispatcher).
*/
@SerialVersionUID(1L)
-class FromConfig(override val resizer: Option[Resizer],
- override val supervisorStrategy: SupervisorStrategy,
- override val routerDispatcher: String) extends Pool {
+class FromConfig(
+ override val resizer: Option[Resizer],
+ override val supervisorStrategy: SupervisorStrategy,
+ override val routerDispatcher: String) extends Pool {
def this() = this(None, Pool.defaultSupervisorStrategy, Dispatchers.DefaultDispatcherId)
diff --git a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala
index 0a79e3f477..75d2e3b57b 100644
--- a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala
+++ b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala
@@ -97,10 +97,10 @@ private[akka] final case class ScatterGatherFirstCompletedRoutees(
@SerialVersionUID(1L)
final case class ScatterGatherFirstCompletedPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
- within: FiniteDuration,
+ within: FiniteDuration,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[ScatterGatherFirstCompletedPool] {
def this(config: Config) =
@@ -165,9 +165,9 @@ final case class ScatterGatherFirstCompletedPool(
*/
@SerialVersionUID(1L)
final case class ScatterGatherFirstCompletedGroup(
- override val paths: immutable.Iterable[String],
- within: FiniteDuration,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ override val paths: immutable.Iterable[String],
+ within: FiniteDuration,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala
index ae7c027010..3c559c8ac9 100644
--- a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala
+++ b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala
@@ -45,11 +45,12 @@ class SmallestMailboxRoutingLogic extends RoutingLogic {
// 4. An ActorRef with unknown mailbox size that isn't processing anything
// 5. An ActorRef with a known mailbox size
// 6. An ActorRef without any messages
- @tailrec private def selectNext(targets: immutable.IndexedSeq[Routee],
- proposedTarget: Routee = NoRoutee,
- currentScore: Long = Long.MaxValue,
- at: Int = 0,
- deep: Boolean = false): Routee = {
+ @tailrec private def selectNext(
+ targets: immutable.IndexedSeq[Routee],
+ proposedTarget: Routee = NoRoutee,
+ currentScore: Long = Long.MaxValue,
+ at: Int = 0,
+ deep: Boolean = false): Routee = {
if (targets.isEmpty)
NoRoutee
else if (at >= targets.size) {
@@ -174,8 +175,8 @@ class SmallestMailboxRoutingLogic extends RoutingLogic {
final case class SmallestMailboxPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[SmallestMailboxPool] {
def this(config: Config) =
diff --git a/akka-actor/src/main/scala/akka/routing/TailChopping.scala b/akka-actor/src/main/scala/akka/routing/TailChopping.scala
index c2d5d59acc..213cca5813 100644
--- a/akka-actor/src/main/scala/akka/routing/TailChopping.scala
+++ b/akka-actor/src/main/scala/akka/routing/TailChopping.scala
@@ -142,11 +142,11 @@ private[akka] final case class TailChoppingRoutees(
@SerialVersionUID(1L)
final case class TailChoppingPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
- within: FiniteDuration,
- interval: FiniteDuration,
+ within: FiniteDuration,
+ interval: FiniteDuration,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[TailChoppingPool] {
def this(config: Config) =
@@ -227,10 +227,10 @@ final case class TailChoppingPool(
* router management messages
*/
final case class TailChoppingGroup(
- override val paths: immutable.Iterable[String],
- within: FiniteDuration,
- interval: FiniteDuration,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group {
+ override val paths: immutable.Iterable[String],
+ within: FiniteDuration,
+ interval: FiniteDuration,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group {
def this(config: Config) =
this(
diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala
index b0f6f275de..4ece346ed1 100644
--- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala
+++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala
@@ -35,7 +35,7 @@ object Serialization {
private final def configToMap(path: String): Map[String, String] = {
import scala.collection.JavaConverters._
- config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ (k -> v.toString) }
+ config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ (k → v.toString) }
}
}
@@ -194,7 +194,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
* loading is performed by the system’s [[akka.actor.DynamicAccess]].
*/
def serializerOf(serializerFQN: String): Try[Serializer] =
- system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, List(classOf[ExtendedActorSystem] -> system)) recoverWith {
+ system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, List(classOf[ExtendedActorSystem] → system)) recoverWith {
case _: NoSuchMethodException ⇒ system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, Nil)
}
@@ -203,7 +203,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
* By default always contains the following mapping: "java" -> akka.serialization.JavaSerializer
*/
private val serializers: Map[String, Serializer] =
- for ((k: String, v: String) ← settings.Serializers) yield k -> serializerOf(v).get
+ for ((k: String, v: String) ← settings.Serializers) yield k → serializerOf(v).get
/**
* bindings is a Seq of tuple representing the mapping from Class to Serializer.
@@ -244,7 +244,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
* Maps from a Serializer Identity (Int) to a Serializer instance (optimization)
*/
val serializerByIdentity: Map[Int, Serializer] =
- Map(NullSerializer.identifier -> NullSerializer) ++ serializers map { case (_, v) ⇒ (v.identifier, v) }
+ Map(NullSerializer.identifier → NullSerializer) ++ serializers map { case (_, v) ⇒ (v.identifier, v) }
private val isJavaSerializationWarningEnabled = settings.config.getBoolean("akka.actor.warn-about-java-serializer-usage")
diff --git a/akka-actor/src/main/scala/akka/util/BoxedType.scala b/akka-actor/src/main/scala/akka/util/BoxedType.scala
index 51286f7ae9..87db921b4d 100644
--- a/akka-actor/src/main/scala/akka/util/BoxedType.scala
+++ b/akka-actor/src/main/scala/akka/util/BoxedType.scala
@@ -7,15 +7,15 @@ object BoxedType {
import java.{ lang ⇒ jl }
private val toBoxed = Map[Class[_], Class[_]](
- classOf[Boolean] -> classOf[jl.Boolean],
- classOf[Byte] -> classOf[jl.Byte],
- classOf[Char] -> classOf[jl.Character],
- classOf[Short] -> classOf[jl.Short],
- classOf[Int] -> classOf[jl.Integer],
- classOf[Long] -> classOf[jl.Long],
- classOf[Float] -> classOf[jl.Float],
- classOf[Double] -> classOf[jl.Double],
- classOf[Unit] -> classOf[scala.runtime.BoxedUnit])
+ classOf[Boolean] → classOf[jl.Boolean],
+ classOf[Byte] → classOf[jl.Byte],
+ classOf[Char] → classOf[jl.Character],
+ classOf[Short] → classOf[jl.Short],
+ classOf[Int] → classOf[jl.Integer],
+ classOf[Long] → classOf[jl.Long],
+ classOf[Float] → classOf[jl.Float],
+ classOf[Double] → classOf[jl.Double],
+ classOf[Unit] → classOf[scala.runtime.BoxedUnit])
final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c
}
diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala
index 4587724a98..8ccfa2518b 100644
--- a/akka-actor/src/main/scala/akka/util/ByteString.scala
+++ b/akka-actor/src/main/scala/akka/util/ByteString.scala
@@ -357,7 +357,7 @@ object ByteString {
private[akka] object Companion {
private val companionMap = Seq(ByteString1, ByteString1C, ByteStrings).
- map(x ⇒ x.SerializationIdentity -> x).toMap.
+ map(x ⇒ x.SerializationIdentity → x).toMap.
withDefault(x ⇒ throw new IllegalArgumentException("Invalid serialization id " + x))
def apply(from: Byte): Companion = companionMap(from)
diff --git a/akka-actor/src/main/scala/akka/util/LineNumbers.scala b/akka-actor/src/main/scala/akka/util/LineNumbers.scala
index 3aa1ea0af1..25f355fed8 100644
--- a/akka-actor/src/main/scala/akka/util/LineNumbers.scala
+++ b/akka-actor/src/main/scala/akka/util/LineNumbers.scala
@@ -187,7 +187,7 @@ object LineNumbers {
val cl = c.getClassLoader
val r = cl.getResourceAsStream(resource)
if (debug) println(s"LNB: resource '$resource' resolved to stream $r")
- Option(r).map(_ -> None)
+ Option(r).map(_ → None)
}
private def getStreamForLambda(l: AnyRef): Option[(InputStream, Some[String])] =
@@ -269,7 +269,7 @@ object LineNumbers {
val count = d.readUnsignedShort()
if (debug) println(s"LNB: reading $count methods")
if (c.contains("Code") && c.contains("LineNumberTable")) {
- (1 to count).map(_ ⇒ readMethod(d, c("Code"), c("LineNumberTable"), filter)).flatten.foldLeft(Int.MaxValue -> 0) {
+ (1 to count).map(_ ⇒ readMethod(d, c("Code"), c("LineNumberTable"), filter)).flatten.foldLeft(Int.MaxValue → 0) {
case ((low, high), (start, end)) ⇒ (Math.min(low, start), Math.max(high, end))
} match {
case (Int.MaxValue, 0) ⇒ None
@@ -282,10 +282,11 @@ object LineNumbers {
}
}
- private def readMethod(d: DataInputStream,
- codeTag: Int,
- lineNumberTableTag: Int,
- filter: Option[String])(implicit c: Constants): Option[(Int, Int)] = {
+ private def readMethod(
+ d: DataInputStream,
+ codeTag: Int,
+ lineNumberTableTag: Int,
+ filter: Option[String])(implicit c: Constants): Option[(Int, Int)] = {
skip(d, 2) // access flags
val name = d.readUnsignedShort() // name
skip(d, 2) // signature
@@ -315,7 +316,7 @@ object LineNumbers {
skip(d, 2) // start PC
d.readUnsignedShort() // finally: the line number
}
- Some(lines.min -> lines.max)
+ Some(lines.min → lines.max)
}
}
if (debug) println(s"LNB: nested attributes yielded: $possibleLines")
diff --git a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala
index 0ba6d45bf7..dc86cf2ec0 100644
--- a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala
+++ b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala
@@ -127,7 +127,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[
if (!found) {
val v = values + value
val n = new Nonroot(root, key, v)
- integrate(n) ++ n.innerAddValue(key, value) :+ (key -> v)
+ integrate(n) ++ n.innerAddValue(key, value) :+ (key → v)
} else ch
}
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala
index abd481d463..28e486320d 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala
@@ -34,7 +34,7 @@ class ActorCreationBenchmark {
}
@TearDown(Level.Trial)
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala
index 3a352282c4..794c506b3b 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala
@@ -28,7 +28,7 @@ class ForkJoinActorBenchmark {
implicit var system: ActorSystem = _
@Setup(Level.Trial)
- def setup():Unit = {
+ def setup(): Unit = {
system = ActorSystem("ForkJoinActorBenchmark", ConfigFactory.parseString(
s"""| akka {
| log-dead-letters = off
@@ -44,11 +44,12 @@ class ForkJoinActorBenchmark {
| }
| }
| }
- """.stripMargin))
+ """.stripMargin
+ ))
}
@TearDown(Level.Trial)
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
@@ -56,7 +57,7 @@ class ForkJoinActorBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
@OperationsPerInvocation(messages)
- def pingPong():Unit = {
+ def pingPong(): Unit = {
val ping = system.actorOf(Props[ForkJoinActorBenchmark.PingPong])
val pong = system.actorOf(Props[ForkJoinActorBenchmark.PingPong])
@@ -72,7 +73,7 @@ class ForkJoinActorBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
@OperationsPerInvocation(messages)
- def floodPipe():Unit = {
+ def floodPipe(): Unit = {
val end = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], None))
val middle = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], Some(end)))
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala
index 1e7d86110c..d63d68d767 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala
@@ -26,7 +26,7 @@ class RouterPoolCreationBenchmark {
var size = 0
@TearDown(Level.Trial)
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala
index 4097f3a678..82c0a38130 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala
@@ -56,13 +56,13 @@ class ScheduleBenchmark {
var promise: Promise[Any] = _
@Setup(Level.Iteration)
- def setup():Unit = {
+ def setup(): Unit = {
winner = (to * ratio + 1).toInt
promise = Promise[Any]()
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
@@ -70,7 +70,7 @@ class ScheduleBenchmark {
def op(idx: Int) = if (idx == winner) promise.trySuccess(idx) else idx
@Benchmark
- def oneSchedule():Unit = {
+ def oneSchedule(): Unit = {
val aIdx = new AtomicInteger(1)
val tryWithNext = scheduler.schedule(0.millis, interval) {
val idx = aIdx.getAndIncrement
@@ -84,7 +84,7 @@ class ScheduleBenchmark {
}
@Benchmark
- def multipleScheduleOnce():Unit = {
+ def multipleScheduleOnce(): Unit = {
val tryWithNext = (1 to to).foldLeft(0.millis -> List[Cancellable]()) {
case ((interv, c), idx) ⇒
(interv + interval, scheduler.scheduleOnce(interv) {
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala
index bda620c9c0..19b30dd2b6 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala
@@ -35,7 +35,7 @@ class StashCreationBenchmark {
val probe = TestProbe()
@TearDown(Level.Trial)
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
diff --git a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala
index 6fe247c86f..fe4e410d05 100644
--- a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala
@@ -25,7 +25,7 @@ class TellOnlyBenchmark {
implicit var system: ActorSystem = _
@Setup(Level.Trial)
- def setup():Unit = {
+ def setup(): Unit = {
system = ActorSystem("TellOnlyBenchmark", ConfigFactory.parseString(
s"""| akka {
| log-dead-letters = off
@@ -46,11 +46,12 @@ class TellOnlyBenchmark {
| type = "akka.actor.TellOnlyBenchmark$$DroppingDispatcherConfigurator"
| mailbox-type = "akka.actor.TellOnlyBenchmark$$UnboundedDroppingMailbox"
| }
- | """.stripMargin))
+ | """.stripMargin
+ ))
}
@TearDown(Level.Trial)
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
@@ -59,7 +60,7 @@ class TellOnlyBenchmark {
var probe: TestProbe = _
@Setup(Level.Iteration)
- def setupIteration():Unit = {
+ def setupIteration(): Unit = {
actor = system.actorOf(Props[TellOnlyBenchmark.Echo].withDispatcher("dropping-dispatcher"))
probe = TestProbe()
probe.watch(actor)
@@ -71,7 +72,7 @@ class TellOnlyBenchmark {
}
@TearDown(Level.Iteration)
- def shutdownIteration():Unit = {
+ def shutdownIteration(): Unit = {
probe.send(actor, flipDrop)
probe.expectNoMsg(200.millis)
actor ! stop
@@ -82,7 +83,7 @@ class TellOnlyBenchmark {
@Benchmark
@OutputTimeUnit(TimeUnit.MICROSECONDS)
- def tell():Unit = {
+ def tell(): Unit = {
probe.send(actor, message)
}
}
@@ -105,7 +106,7 @@ object TellOnlyBenchmark {
class DroppingMessageQueue extends UnboundedMailbox.MessageQueue {
@volatile var dropping = false
- override def enqueue(receiver: ActorRef, handle: Envelope):Unit = {
+ override def enqueue(receiver: ActorRef, handle: Envelope): Unit = {
if (handle.message == flipDrop) dropping = !dropping
else if (!dropping) super.enqueue(receiver, handle)
}
@@ -125,21 +126,22 @@ object TellOnlyBenchmark {
_throughput: Int,
_throughputDeadlineTime: Duration,
_executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
- _shutdownTimeout: FiniteDuration)
- extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
+ _shutdownTimeout: FiniteDuration
+ )
+ extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
override protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = {
val mbox = receiver.mailbox
mbox.enqueue(receiver.self, invocation)
mbox.messageQueue match {
case mb: DroppingMessageQueue if mb.dropping ⇒ // do nothing
- case _ ⇒ registerForExecution(mbox, true, false)
+ case _ ⇒ registerForExecution(mbox, true, false)
}
}
}
class DroppingDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites)
- extends MessageDispatcherConfigurator(config, prerequisites) {
+ extends MessageDispatcherConfigurator(config, prerequisites) {
override def dispatcher(): MessageDispatcher = new DroppingDispatcher(
this,
@@ -147,6 +149,7 @@ object TellOnlyBenchmark {
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),
configureExecutor(),
- config.getMillisDuration("shutdown-timeout"))
+ config.getMillisDuration("shutdown-timeout")
+ )
}
}
diff --git a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala
index a62ea6da6c..553bbee0f0 100644
--- a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala
@@ -48,7 +48,7 @@ class ORSetMergeBenchmark {
var elem2: String = _
@Setup(Level.Trial)
- def setup():Unit = {
+ def setup(): Unit = {
set1 = (1 to set1Size).foldLeft(ORSet.empty[String])((s, n) => s.add(nextNode(), "elem" + n))
addFromSameNode = set1.add(nodeA, "elem" + set1Size + 1).merge(set1)
addFromOtherNode = set1.add(nodeB, "elem" + set1Size + 1).merge(set1)
diff --git a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala
index 6b04e499ce..612387cb2d 100644
--- a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala
@@ -45,7 +45,7 @@ class VersionVectorBenchmark {
var dot1: VersionVector = _
@Setup(Level.Trial)
- def setup():Unit = {
+ def setup(): Unit = {
vv1 = (1 to size).foldLeft(VersionVector.empty)((vv, n) => vv + nextNode())
vv2 = vv1 + nextNode()
vv3 = vv1 + nextNode()
diff --git a/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala
index 6e238efd0a..56ccdedc05 100644
--- a/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala
@@ -21,7 +21,7 @@ class CachingConfigBenchmark {
val deepConfig = ConfigFactory.parseString(deepConfigString)
val deepCaching = new CachingConfig(deepConfig)
- @Benchmark def deep_config = deepConfig.hasPath(deepKey)
+ @Benchmark def deep_config = deepConfig.hasPath(deepKey)
@Benchmark def deep_caching = deepCaching.hasPath(deepKey)
}
diff --git a/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala
index 0ca7640522..1ebf82d1f6 100644
--- a/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala
@@ -42,7 +42,7 @@ mailbox {
val ref = sys.actorOf(Props(new Actor {
def receive = {
case Stop => sender() ! Stop
- case _ =>
+ case _ =>
}
}).withDispatcher("dispatcher").withMailbox("mailbox"), "receiver")
diff --git a/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala
index c0718b9b1c..706c9c49ed 100644
--- a/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala
@@ -28,7 +28,8 @@ class HttpBenchmark {
"""
akka {
loglevel = "ERROR"
- }""".stripMargin).withFallback(ConfigFactory.load())
+ }""".stripMargin
+ ).withFallback(ConfigFactory.load())
implicit val system = ActorSystem("HttpBenchmark", config)
implicit val materializer = ActorMaterializer()
@@ -38,7 +39,7 @@ class HttpBenchmark {
var pool: Flow[(HttpRequest, Int), (Try[HttpResponse], Int), _] = _
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
val route = {
path("test") {
get {
@@ -53,21 +54,21 @@ class HttpBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
Await.ready(Http().shutdownAllConnectionPools(), 1.second)
binding.unbind()
Await.result(system.terminate(), 5.seconds)
}
@Benchmark
- def single_request():Unit = {
+ def single_request(): Unit = {
import system.dispatcher
val response = Await.result(Http().singleRequest(request), 1.second)
Await.result(Unmarshal(response.entity).to[String], 1.second)
}
@Benchmark
- def single_request_pool():Unit = {
+ def single_request_pool(): Unit = {
import system.dispatcher
val (response, id) = Await.result(Source.single(HttpRequest(uri = "/test") -> 42).via(pool).runWith(Sink.head), 1.second)
Await.result(Unmarshal(response.get.entity).to[String], 1.second)
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala
index 239441bbb5..a025a809d8 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala
@@ -45,7 +45,7 @@ class LevelDbBatchingBenchmark {
val batch_200 = List.fill(200) { AtomicWrite(PersistentRepr("data", 12, "pa")) }
@Setup(Level.Trial)
- def setup():Unit = {
+ def setup(): Unit = {
sys = ActorSystem("sys")
deleteStorage(sys)
SharedLeveldbJournal.setStore(store, sys)
@@ -55,7 +55,7 @@ class LevelDbBatchingBenchmark {
}
@TearDown(Level.Trial)
- def tearDown():Unit = {
+ def tearDown(): Unit = {
store ! PoisonPill
Thread.sleep(500)
@@ -66,7 +66,7 @@ class LevelDbBatchingBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MICROSECONDS)
@OperationsPerInvocation(1)
- def write_1():Unit = {
+ def write_1(): Unit = {
probe.send(store, WriteMessages(batch_1))
probe.expectMsgType[Any]
}
@@ -74,7 +74,7 @@ class LevelDbBatchingBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MICROSECONDS)
@OperationsPerInvocation(10)
- def writeBatch_10():Unit = {
+ def writeBatch_10(): Unit = {
probe.send(store, WriteMessages(batch_10))
probe.expectMsgType[Any]
}
@@ -82,7 +82,7 @@ class LevelDbBatchingBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MICROSECONDS)
@OperationsPerInvocation(100)
- def writeBatch_100():Unit = {
+ def writeBatch_100(): Unit = {
probe.send(store, WriteMessages(batch_100))
probe.expectMsgType[Any]
}
@@ -90,7 +90,7 @@ class LevelDbBatchingBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MICROSECONDS)
@OperationsPerInvocation(200)
- def writeBatch_200():Unit = {
+ def writeBatch_200(): Unit = {
probe.send(store, WriteMessages(batch_200))
probe.expectMsgType[Any]
}
@@ -101,7 +101,8 @@ class LevelDbBatchingBenchmark {
val storageLocations = List(
"akka.persistence.journal.leveldb.dir",
"akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s ⇒ new File(sys.settings.config.getString(s)))
+ "akka.persistence.snapshot-store.local.dir"
+ ).map(s ⇒ new File(sys.settings.config.getString(s)))
storageLocations.foreach(FileUtils.deleteDirectory)
}
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala
index c7612ce10d..4ed8264520 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala
@@ -32,7 +32,8 @@ class PersistentActorDeferBenchmark {
lazy val storageLocations = List(
"akka.persistence.journal.leveldb.dir",
"akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s ⇒ new File(system.settings.config.getString(s)))
+ "akka.persistence.snapshot-store.local.dir"
+ ).map(s ⇒ new File(system.settings.config.getString(s)))
var system: ActorSystem = _
@@ -43,7 +44,7 @@ class PersistentActorDeferBenchmark {
val data10k = (1 to 10000).toArray
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
system = ActorSystem("test", config)
probe = TestProbe()(system)
@@ -54,7 +55,7 @@ class PersistentActorDeferBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
@@ -63,7 +64,7 @@ class PersistentActorDeferBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def tell_persistAsync_defer_persistAsync_reply():Unit = {
+ def tell_persistAsync_defer_persistAsync_reply(): Unit = {
for (i <- data10k) persistAsync_defer.tell(i, probe.ref)
probe.expectMsg(data10k.last)
@@ -71,7 +72,7 @@ class PersistentActorDeferBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def tell_persistAsync_defer_persistAsync_replyASAP():Unit = {
+ def tell_persistAsync_defer_persistAsync_replyASAP(): Unit = {
for (i <- data10k) persistAsync_defer_replyASAP.tell(i, probe.ref)
probe.expectMsg(data10k.last)
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala
index de8912d54e..f0e2b31a60 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala
@@ -21,7 +21,8 @@ class PersistentActorThroughputBenchmark {
lazy val storageLocations = List(
"akka.persistence.journal.leveldb.dir",
"akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s ⇒ new File(system.settings.config.getString(s)))
+ "akka.persistence.snapshot-store.local.dir"
+ ).map(s ⇒ new File(system.settings.config.getString(s)))
var system: ActorSystem = _
@@ -35,7 +36,7 @@ class PersistentActorThroughputBenchmark {
val data10k = (1 to 10000).toArray
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
system = ActorSystem("test", config)
probe = TestProbe()(system)
@@ -52,7 +53,7 @@ class PersistentActorThroughputBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
@@ -61,7 +62,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def actor_normalActor_reply_baseline():Unit = {
+ def actor_normalActor_reply_baseline(): Unit = {
for (i <- data10k) actor.tell(i, probe.ref)
probe.expectMsg(data10k.last)
@@ -69,7 +70,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def persistentActor_persist_reply():Unit = {
+ def persistentActor_persist_reply(): Unit = {
for (i <- data10k) persistPersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
@@ -77,7 +78,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def persistentActor_persistAsync_reply():Unit = {
+ def persistentActor_persistAsync_reply(): Unit = {
for (i <- data10k) persistAsync1PersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
@@ -85,7 +86,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def persistentActor_noPersist_reply():Unit = {
+ def persistentActor_noPersist_reply(): Unit = {
for (i <- data10k) noPersistPersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
@@ -93,7 +94,7 @@ class PersistentActorThroughputBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def persistentActor_persistAsync_replyRightOnCommandReceive():Unit = {
+ def persistentActor_persistAsync_replyRightOnCommandReceive(): Unit = {
for (i <- data10k) persistAsyncQuickReplyPersistentActor.tell(i, probe.ref)
probe.expectMsg(Evt(data10k.last))
diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala
index 59a300c8ff..80fee8833e 100644
--- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala
@@ -22,7 +22,8 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
lazy val storageLocations = List(
"akka.persistence.journal.leveldb.dir",
"akka.persistence.journal.leveldb-shared.store.dir",
- "akka.persistence.snapshot-store.local.dir").map(s ⇒ new File(system.settings.config.getString(s)))
+ "akka.persistence.snapshot-store.local.dir"
+ ).map(s ⇒ new File(system.settings.config.getString(s)))
var system: ActorSystem = _
@@ -36,7 +37,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
val dataCount = 10000
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
system = ActorSystem("PersistentActorWithAtLeastOnceDeliveryBenchmark", config)
probe = TestProbe()(system)
@@ -51,7 +52,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
@@ -60,7 +61,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def persistentActor_persistAsync_with_AtLeastOnceDelivery():Unit = {
+ def persistentActor_persistAsync_with_AtLeastOnceDelivery(): Unit = {
for (i <- 1 to dataCount)
persistAsyncPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref)
probe.expectMsg(20.seconds, Evt(dataCount))
@@ -68,7 +69,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def persistentActor_persist_with_AtLeastOnceDelivery():Unit = {
+ def persistentActor_persist_with_AtLeastOnceDelivery(): Unit = {
for (i <- 1 to dataCount)
persistPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref)
probe.expectMsg(2.minutes, Evt(dataCount))
@@ -76,7 +77,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
@Benchmark
@OperationsPerInvocation(10000)
- def persistentActor_noPersist_with_AtLeastOnceDelivery():Unit = {
+ def persistentActor_noPersist_with_AtLeastOnceDelivery(): Unit = {
for (i <- 1 to dataCount)
noPersistPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref)
probe.expectMsg(20.seconds, Evt(dataCount))
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala
index 8e60d645e9..0e39fa6032 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala
@@ -4,7 +4,7 @@
package akka.stream
-import akka.{Done, NotUsed}
+import akka.{ Done, NotUsed }
import akka.actor.ActorSystem
import akka.stream.scaladsl._
import java.util.concurrent.TimeUnit
@@ -30,7 +30,7 @@ class FlatMapMergeBenchmark {
def createSource(count: Int): Graph[SourceShape[Int], NotUsed] = akka.stream.Fusing.aggressive(Source.repeat(1).take(count))
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
val source = NumberOfStreams match {
// Base line: process NumberOfElements-many elements from a single source without using flatMapMerge
case 0 => createSource(NumberOfElements)
@@ -43,13 +43,13 @@ class FlatMapMergeBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
Await.result(system.terminate(), 5.seconds)
}
@Benchmark
@OperationsPerInvocation(100000) // Note: needs to match NumberOfElements.
- def flat_map_merge_100k_elements():Unit = {
+ def flat_map_merge_100k_elements(): Unit = {
Await.result(graph.run(), Duration.Inf)
}
}
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala
index 5e5cf0df3f..44f97b3375 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala
@@ -47,7 +47,8 @@ class FlowMapBenchmark {
type = akka.testkit.CallingThreadDispatcherConfigurator
}
}
- }""".stripMargin).withFallback(ConfigFactory.load())
+ }""".stripMargin
+ ).withFallback(ConfigFactory.load())
implicit val system = ActorSystem("test", config)
@@ -69,7 +70,7 @@ class FlowMapBenchmark {
var numberOfMapOps = 0
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
val settings = ActorMaterializerSettings(system)
.withInputBuffer(initialInputBufferSize, initialInputBufferSize)
@@ -111,13 +112,13 @@ class FlowMapBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
Await.result(system.terminate(), 5.seconds)
}
@Benchmark
@OperationsPerInvocation(100000)
- def flow_map_100k_elements():Unit = {
+ def flow_map_100k_elements(): Unit = {
val lock = new Lock() // todo rethink what is the most lightweight way to await for a streams completion
lock.acquire()
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/GraphBuilderBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/GraphBuilderBenchmark.scala
index 477d9ec149..2a791feabb 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/GraphBuilderBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/GraphBuilderBenchmark.scala
@@ -16,22 +16,22 @@ class GraphBuilderBenchmark {
var complexity = 0
@Benchmark
- def flow_with_map():Unit = {
+ def flow_with_map(): Unit = {
MaterializationBenchmark.flowWithMapBuilder(complexity)
}
@Benchmark
- def graph_with_junctions():Unit ={
+ def graph_with_junctions(): Unit = {
MaterializationBenchmark.graphWithJunctionsBuilder(complexity)
}
@Benchmark
- def graph_with_nested_imports():Unit = {
+ def graph_with_nested_imports(): Unit = {
MaterializationBenchmark.graphWithNestedImportsBuilder(complexity)
}
@Benchmark
- def graph_with_imported_flow():Unit = {
+ def graph_with_imported_flow(): Unit = {
MaterializationBenchmark.graphWithImportedFlowBuilder(complexity)
}
}
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala
index afb1ecf472..d48653187c 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala
@@ -1,13 +1,12 @@
package akka.stream
import akka.event._
-import akka.stream.impl.fusing.{ GraphInterpreterSpecKit, GraphStages}
+import akka.stream.impl.fusing.{ GraphInterpreterSpecKit, GraphStages }
import akka.stream.impl.fusing.GraphStages
import akka.stream.impl.fusing.GraphInterpreter.{ DownstreamBoundaryStageLogic, UpstreamBoundaryStageLogic }
import akka.stream.stage._
import org.openjdk.jmh.annotations._
-
import java.util.concurrent.TimeUnit
@State(Scope.Benchmark)
@@ -24,7 +23,7 @@ class InterpreterBenchmark {
@Benchmark
@OperationsPerInvocation(100000)
- def graph_interpreter_100k_elements():Unit = {
+ def graph_interpreter_100k_elements(): Unit = {
new GraphInterpreterSpecKit {
new TestSetup {
val identities = Vector.fill(numberOfIds)(GraphStages.identity[Int])
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala
index 76e5e562a6..ac5b6ab267 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala
@@ -43,33 +43,30 @@ object MaterializationBenchmark {
val graphWithNestedImportsBuilder = (numOfNestedGraphs: Int) => {
var flow: Graph[FlowShape[Unit, Unit], NotUsed] = Flow[Unit].map(identity)
for (_ <- 1 to numOfNestedGraphs) {
- flow = GraphDSL.create(flow) { b ⇒
- flow ⇒
- FlowShape(flow.in, flow.out)
+ flow = GraphDSL.create(flow) { b ⇒ flow ⇒
+ FlowShape(flow.in, flow.out)
}
}
- RunnableGraph.fromGraph(GraphDSL.create(flow) { implicit b ⇒
- flow ⇒
- import GraphDSL.Implicits._
- Source.single(()) ~> flow ~> Sink.ignore
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(flow) { implicit b ⇒ flow ⇒
+ import GraphDSL.Implicits._
+ Source.single(()) ~> flow ~> Sink.ignore
+ ClosedShape
})
}
val graphWithImportedFlowBuilder = (numOfFlows: Int) =>
- RunnableGraph.fromGraph(GraphDSL.create(Source.single(())) { implicit b ⇒
- source ⇒
- import GraphDSL.Implicits._
- val flow = Flow[Unit].map(identity)
- var out: Outlet[Unit] = source.out
- for (i <- 0 until numOfFlows) {
- val flowShape = b.add(flow)
- out ~> flowShape
- out = flowShape.outlet
- }
- out ~> Sink.ignore
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(Source.single(())) { implicit b ⇒ source ⇒
+ import GraphDSL.Implicits._
+ val flow = Flow[Unit].map(identity)
+ var out: Outlet[Unit] = source.out
+ for (i <- 0 until numOfFlows) {
+ val flowShape = b.add(flow)
+ out ~> flowShape
+ out = flowShape.outlet
+ }
+ out ~> Sink.ignore
+ ClosedShape
})
}
@@ -91,7 +88,7 @@ class MaterializationBenchmark {
var complexity = 0
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
flowWithMap = flowWithMapBuilder(complexity)
graphWithJunctions = graphWithJunctionsBuilder(complexity)
graphWithNestedImports = graphWithNestedImportsBuilder(complexity)
@@ -99,22 +96,19 @@ class MaterializationBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
Await.result(system.terminate(), 5.seconds)
}
@Benchmark
- def flow_with_map():Unit = flowWithMap.run()
-
+ def flow_with_map(): Unit = flowWithMap.run()
@Benchmark
- def graph_with_junctions():Unit = graphWithJunctions.run()
-
+ def graph_with_junctions(): Unit = graphWithJunctions.run()
@Benchmark
- def graph_with_nested_imports():Unit = graphWithNestedImports.run()
-
+ def graph_with_nested_imports(): Unit = graphWithNestedImports.run()
@Benchmark
- def graph_with_imported_flow():Unit = graphWithImportedFlow.run()
+ def graph_with_imported_flow(): Unit = graphWithImportedFlow.run()
}
diff --git a/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala
index cba982d040..7075b15050 100644
--- a/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala
+++ b/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala
@@ -49,7 +49,7 @@ class FileSourcesBenchmark {
var ioSourceLinesIterator: Source[ByteString, NotUsed] = _
@Setup
- def setup():Unit = {
+ def setup(): Unit = {
fileChannelSource = FileIO.fromPath(file, bufSize)
fileInputStreamSource = StreamConverters.fromInputStream(() ⇒ Files.newInputStream(file), bufSize)
ioSourceLinesIterator = Source.fromIterator(() ⇒ scala.io.Source.fromFile(file.toFile).getLines()).map(ByteString(_))
@@ -61,26 +61,26 @@ class FileSourcesBenchmark {
}
@TearDown
- def shutdown():Unit = {
+ def shutdown(): Unit = {
Await.result(system.terminate(), Duration.Inf)
}
@Benchmark
- def fileChannel():Unit = {
+ def fileChannel(): Unit = {
val h = fileChannelSource.to(Sink.ignore).run()
Await.result(h, 30.seconds)
}
@Benchmark
- def fileChannel_noReadAhead():Unit = {
+ def fileChannel_noReadAhead(): Unit = {
val h = fileChannelSource.withAttributes(Attributes.inputBuffer(1, 1)).to(Sink.ignore).run()
Await.result(h, 30.seconds)
}
@Benchmark
- def inputStream():Unit = {
+ def inputStream(): Unit = {
val h = fileInputStreamSource.to(Sink.ignore).run()
Await.result(h, 30.seconds)
@@ -92,7 +92,7 @@ class FileSourcesBenchmark {
* FileSourcesBenchmark.naive_ioSourceLinesIterator avgt 20 7067.944 ± 1341.847 ms/op
*/
@Benchmark
- def naive_ioSourceLinesIterator():Unit = {
+ def naive_ioSourceLinesIterator(): Unit = {
val p = Promise[Done]()
ioSourceLinesIterator.to(Sink.onComplete(p.complete(_))).run()
diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala
index c898788490..09556b3306 100644
--- a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala
+++ b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala
@@ -163,7 +163,7 @@ private[camel] class ProducerRegistrar(activationTracker: ActorRef) extends Acto
try {
val endpoint = camelContext.getEndpoint(endpointUri)
val processor = new SendProcessor(endpoint)
- camelObjects = camelObjects.updated(producer, endpoint -> processor)
+ camelObjects = camelObjects.updated(producer, endpoint → processor)
// if this throws, the supervisor stops the producer and de-registers it on termination
processor.start()
producer ! CamelProducerObjects(endpoint, processor)
diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala
index ccab13e0a2..8413f60de0 100644
--- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala
+++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala
@@ -49,10 +49,11 @@ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends D
* [actorPath]?[options]%s,
* where [actorPath] refers to the actor path to the actor.
*/
-private[camel] class ActorEndpoint(uri: String,
- comp: ActorComponent,
- val path: ActorEndpointPath,
- val camel: Camel) extends DefaultEndpoint(uri, comp) with ActorEndpointConfig {
+private[camel] class ActorEndpoint(
+ uri: String,
+ comp: ActorComponent,
+ val path: ActorEndpointPath,
+ val camel: Camel) extends DefaultEndpoint(uri, comp) with ActorEndpointConfig {
/**
* The ActorEndpoint only supports receiving messages from Camel.
@@ -174,7 +175,7 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex
path.findActorIn(camel.system) getOrElse (throw new ActorNotRegisteredException(path.actorPath))
private[this] def messageFor(exchange: CamelExchangeAdapter) =
- exchange.toRequestMessage(Map(CamelMessage.MessageExchangeId -> exchange.getExchangeId))
+ exchange.toRequestMessage(Map(CamelMessage.MessageExchangeId → exchange.getExchangeId))
}
/**
diff --git a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala
index 1fd8d06899..2043d7aed6 100644
--- a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala
@@ -50,17 +50,17 @@ class CamelExchangeAdapterTest extends FunSuite with SharedCamelSystem {
test("mustCreateRequestMessageFromInMessage") {
val m = sampleInOnly.toRequestMessage
- assert(m === CamelMessage("test-in", Map("key-in" -> "val-in")))
+ assert(m === CamelMessage("test-in", Map("key-in" → "val-in")))
}
test("mustCreateResponseMessageFromInMessage") {
val m = sampleInOnly.toResponseMessage
- assert(m === CamelMessage("test-in", Map("key-in" -> "val-in")))
+ assert(m === CamelMessage("test-in", Map("key-in" → "val-in")))
}
test("mustCreateResponseMessageFromOutMessage") {
val m = sampleInOut.toResponseMessage
- assert(m === CamelMessage("test-out", Map("key-out" -> "val-out")))
+ assert(m === CamelMessage("test-out", Map("key-out" → "val-out")))
}
test("mustCreateFailureMessageFromExceptionAndInMessage") {
@@ -82,30 +82,30 @@ class CamelExchangeAdapterTest extends FunSuite with SharedCamelSystem {
}
test("mustCreateRequestMessageFromInMessageWithAdditionalHeader") {
- val m = sampleInOnly.toRequestMessage(Map("x" -> "y"))
- assert(m === CamelMessage("test-in", Map("key-in" -> "val-in", "x" -> "y")))
+ val m = sampleInOnly.toRequestMessage(Map("x" → "y"))
+ assert(m === CamelMessage("test-in", Map("key-in" → "val-in", "x" → "y")))
}
test("mustCreateResponseMessageFromInMessageWithAdditionalHeader") {
- val m = sampleInOnly.toResponseMessage(Map("x" -> "y"))
- assert(m === CamelMessage("test-in", Map("key-in" -> "val-in", "x" -> "y")))
+ val m = sampleInOnly.toResponseMessage(Map("x" → "y"))
+ assert(m === CamelMessage("test-in", Map("key-in" → "val-in", "x" → "y")))
}
test("mustCreateResponseMessageFromOutMessageWithAdditionalHeader") {
- val m = sampleInOut.toResponseMessage(Map("x" -> "y"))
- assert(m === CamelMessage("test-out", Map("key-out" -> "val-out", "x" -> "y")))
+ val m = sampleInOut.toResponseMessage(Map("x" → "y"))
+ assert(m === CamelMessage("test-out", Map("key-out" → "val-out", "x" → "y")))
}
test("mustCreateFailureMessageFromExceptionAndInMessageWithAdditionalHeader") {
val e1 = sampleInOnly
e1.setException(new Exception("test1"))
assert(e1.toAkkaCamelException.getMessage === "test1")
- val headers = e1.toAkkaCamelException(Map("x" -> "y")).headers
+ val headers = e1.toAkkaCamelException(Map("x" → "y")).headers
assert(headers("key-in") === "val-in")
assert(headers("x") === "y")
assert(e1.toFailureMessage.cause.getMessage === "test1")
- val failureHeaders = e1.toFailureResult(Map("x" -> "y")).headers
+ val failureHeaders = e1.toFailureResult(Map("x" → "y")).headers
assert(failureHeaders("key-in") === "val-in")
assert(failureHeaders("x") === "y")
@@ -115,11 +115,11 @@ class CamelExchangeAdapterTest extends FunSuite with SharedCamelSystem {
val e1 = sampleInOut
e1.setException(new Exception("test2"))
assert(e1.toAkkaCamelException.getMessage === "test2")
- val headers = e1.toAkkaCamelException(Map("x" -> "y")).headers
+ val headers = e1.toAkkaCamelException(Map("x" → "y")).headers
assert(headers("key-out") === "val-out")
assert(headers("x") === "y")
assert(e1.toFailureMessage.cause.getMessage === "test2")
- val failureHeaders = e1.toFailureResult(Map("x" -> "y")).headers
+ val failureHeaders = e1.toFailureResult(Map("x" → "y")).headers
assert(failureHeaders("key-out") === "val-out")
assert(failureHeaders("x") === "y")
}
diff --git a/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala b/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala
index f79ba3ccd7..7bff0f7a4f 100644
--- a/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala
@@ -25,7 +25,7 @@ class CamelMessageTest extends Matchers with WordSpecLike with SharedCamelSystem
message.setExchange(new DefaultExchange(camel.context))
val attachmentToAdd = new DataHandler(new URL("https://another.url"))
- CamelMessage.copyContent(new CamelMessage("body", Map("key" -> "baz"), Map("key" -> attachmentToAdd)), message)
+ CamelMessage.copyContent(new CamelMessage("body", Map("key" → "baz"), Map("key" → attachmentToAdd)), message)
assert(message.getBody === "body")
assert(message.getHeader("foo") === "bar")
diff --git a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala
index c48562ba0a..b8edf30fed 100644
--- a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala
@@ -67,8 +67,8 @@ class ConcurrentActivationTest extends WordSpec with Matchers with NonSharedCame
}
val (activatedConsumerNames, activatedProducerNames) = partitionNames(activations)
val (deactivatedConsumerNames, deactivatedProducerNames) = partitionNames(deactivations)
- assertContainsSameElements(activatedConsumerNames -> deactivatedConsumerNames)
- assertContainsSameElements(activatedProducerNames -> deactivatedProducerNames)
+ assertContainsSameElements(activatedConsumerNames → deactivatedConsumerNames)
+ assertContainsSameElements(activatedProducerNames → deactivatedProducerNames)
} finally {
system.eventStream.publish(TestEvent.UnMute(eventFilter))
}
@@ -95,7 +95,7 @@ class ConsumerBroadcast(promise: Promise[(Future[List[List[ActorRef]]], Future[L
val routee = context.actorOf(Props(classOf[Registrar], i, number, activationListPromise, deactivationListPromise), "registrar-" + i)
routee.path.toString
}
- promise.success(Future.sequence(allActivationFutures) -> Future.sequence(allDeactivationFutures))
+ promise.success(Future.sequence(allActivationFutures) → Future.sequence(allDeactivationFutures))
broadcaster = Some(context.actorOf(BroadcastGroup(routeePaths).props(), "registrarRouter"))
case reg: Any ⇒
diff --git a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala
index 85fc931617..3c37885019 100644
--- a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala
@@ -24,31 +24,31 @@ class MessageScalaTest extends FunSuite with Matchers with SharedCamelSystem {
}
test("mustConvertDoubleHeaderToString") {
- val message = CamelMessage("test", Map("test" -> 1.4))
+ val message = CamelMessage("test", Map("test" → 1.4))
message.headerAs[String]("test").get should ===("1.4")
}
test("mustReturnSubsetOfHeaders") {
- val message = CamelMessage("test", Map("A" -> "1", "B" -> "2"))
- message.headers(Set("B")) should ===(Map("B" -> "2"))
+ val message = CamelMessage("test", Map("A" → "1", "B" → "2"))
+ message.headers(Set("B")) should ===(Map("B" → "2"))
}
test("mustTransformBodyAndPreserveHeaders") {
- CamelMessage("a", Map("A" -> "1")).mapBody((body: String) ⇒ body + "b") should ===(CamelMessage("ab", Map("A" -> "1")))
+ CamelMessage("a", Map("A" → "1")).mapBody((body: String) ⇒ body + "b") should ===(CamelMessage("ab", Map("A" → "1")))
}
test("mustConvertBodyAndPreserveHeaders") {
- CamelMessage(1.4, Map("A" -> "1")).withBodyAs[String] should ===(CamelMessage("1.4", Map("A" -> "1")))
+ CamelMessage(1.4, Map("A" → "1")).withBodyAs[String] should ===(CamelMessage("1.4", Map("A" → "1")))
}
test("mustSetBodyAndPreserveHeaders") {
- CamelMessage("test1", Map("A" -> "1")).copy(body = "test2") should ===(
- CamelMessage("test2", Map("A" -> "1")))
+ CamelMessage("test1", Map("A" → "1")).copy(body = "test2") should ===(
+ CamelMessage("test2", Map("A" → "1")))
}
test("mustSetHeadersAndPreserveBody") {
- CamelMessage("test1", Map("A" -> "1")).copy(headers = Map("C" -> "3")) should ===(
- CamelMessage("test1", Map("C" -> "3")))
+ CamelMessage("test1", Map("A" → "1")).copy(headers = Map("C" → "3")) should ===(
+ CamelMessage("test1", Map("C" → "3")))
}
test("mustBeAbleToReReadStreamCacheBody") {
diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala
index 6106ff7f37..7dfeb9127b 100644
--- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala
@@ -45,9 +45,9 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk
"01 produce a message and receive normal response" in {
val producer = system.actorOf(Props(new TestProducer("direct:producer-test-2", true)), name = "01-direct-producer-2")
- val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123"))
producer.tell(message, testActor)
- expectMsg(CamelMessage("received TEST", Map(CamelMessage.MessageExchangeId -> "123")))
+ expectMsg(CamelMessage("received TEST", Map(CamelMessage.MessageExchangeId → "123")))
}
"02 produce a message and receive failure response" in {
@@ -72,13 +72,13 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk
supervisor.tell(Props(new TestProducer("direct:producer-test-2")), testActor)
val producer = receiveOne(timeoutDuration).asInstanceOf[ActorRef]
- val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
producer.tell(message, testActor)
expectMsgPF(timeoutDuration) {
case Failure(e: AkkaCamelException) ⇒
e.getMessage should ===("failure")
- e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123"))
+ e.headers should ===(Map(CamelMessage.MessageExchangeId → "123"))
}
}
Await.ready(latch, timeoutDuration)
@@ -106,21 +106,21 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk
"10 produce message to direct:producer-test-3 and receive normal response" in {
val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "10-direct-producer-test-3")
- val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123"))
producer.tell(message, testActor)
- expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123")))
+ expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123")))
}
"11 produce message to direct:producer-test-3 and receive failure response" in {
val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "11-direct-producer-test-3-receive-failure")
- val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
producer.tell(message, testActor)
expectMsgPF(timeoutDuration) {
case Failure(e: AkkaCamelException) ⇒
e.getMessage should ===("failure")
- e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123"))
+ e.headers should ===(Map(CamelMessage.MessageExchangeId → "123"))
}
}
}
@@ -128,22 +128,22 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk
"12 produce message, forward normal response of direct:producer-test-2 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "12-reply-forwarding-target")
val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "12-direct-producer-test-2-forwarder")
- val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123"))
producer.tell(message, testActor)
- expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result")))
+ expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123", "test" → "result")))
}
"13 produce message, forward failure response of direct:producer-test-2 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "13-reply-forwarding-target")
val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "13-direct-producer-test-2-forwarder-failure")
- val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
producer.tell(message, testActor)
expectMsgPF(timeoutDuration) {
case Failure(e: AkkaCamelException) ⇒
e.getMessage should ===("failure")
- e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure"))
+ e.headers should ===(Map(CamelMessage.MessageExchangeId → "123", "test" → "failure"))
}
}
}
@@ -170,23 +170,23 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk
"16 produce message, forward normal response from direct:producer-test-3 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "16-reply-forwarding-target")
val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "16-direct-producer-test-3-to-replying-actor")
- val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123"))
producer.tell(message, testActor)
- expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result")))
+ expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123", "test" → "result")))
}
"17 produce message, forward failure response from direct:producer-test-3 to a replying target actor and receive response" in {
val target = system.actorOf(Props[ReplyingForwardTarget], name = "17-reply-forwarding-target")
val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "17-direct-producer-test-3-forward-failure")
- val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
producer.tell(message, testActor)
expectMsgPF(timeoutDuration) {
case Failure(e: AkkaCamelException) ⇒
e.getMessage should ===("failure")
- e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure"))
+ e.headers should ===(Map(CamelMessage.MessageExchangeId → "123", "test" → "failure"))
}
}
}
@@ -324,10 +324,10 @@ object ProducerFeatureTest {
class ReplyingForwardTarget extends Actor {
def receive = {
case msg: CamelMessage ⇒
- context.sender() ! (msg.copy(headers = msg.headers + ("test" -> "result")))
+ context.sender() ! (msg.copy(headers = msg.headers + ("test" → "result")))
case msg: akka.actor.Status.Failure ⇒
msg.cause match {
- case e: AkkaCamelException ⇒ context.sender() ! Status.Failure(new AkkaCamelException(e, e.headers + ("test" -> "failure")))
+ case e: AkkaCamelException ⇒ context.sender() ! Status.Failure(new AkkaCamelException(e, e.headers + ("test" → "failure")))
}
}
}
diff --git a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala
index 0ce10b714e..4bd3d03b04 100644
--- a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala
+++ b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala
@@ -34,10 +34,10 @@ class UntypedProducerTest extends WordSpec with Matchers with BeforeAndAfterAll
"produce a message and receive a normal response" in {
val producer = system.actorOf(Props[SampleUntypedReplyingProducer], name = "sample-untyped-replying-producer")
- val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123"))
val future = producer.ask(message)(timeout)
- val expected = CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123"))
+ val expected = CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123"))
Await.result(future, timeout) match {
case result: CamelMessage ⇒ result should ===(expected)
case unexpected ⇒ fail("Actor responded with unexpected message:" + unexpected)
@@ -48,14 +48,14 @@ class UntypedProducerTest extends WordSpec with Matchers with BeforeAndAfterAll
"produce a message and receive a failure response" in {
val producer = system.actorOf(Props[SampleUntypedReplyingProducer], name = "sample-untyped-replying-producer-failure")
- val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123"))
+ val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123"))
filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) {
val future = producer.ask(message)(timeout).failed
Await.result(future, timeout) match {
case e: AkkaCamelException ⇒
e.getMessage should ===("failure")
- e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123"))
+ e.headers should ===(Map(CamelMessage.MessageExchangeId → "123"))
case unexpected ⇒ fail("Actor responded with unexpected message:" + unexpected)
}
}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
index 0b77b79e6d..6ee75e16f9 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
@@ -153,13 +153,15 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging {
/**
* Start periodic gossip to random nodes in cluster
*/
- val gossipTask = scheduler.schedule(PeriodicTasksInitialDelay max CollectorGossipInterval,
+ val gossipTask = scheduler.schedule(
+ PeriodicTasksInitialDelay max CollectorGossipInterval,
CollectorGossipInterval, self, GossipTick)
/**
* Start periodic metrics collection
*/
- val sampleTask = scheduler.schedule(PeriodicTasksInitialDelay max CollectorSampleInterval,
+ val sampleTask = scheduler.schedule(
+ PeriodicTasksInitialDelay max CollectorSampleInterval,
CollectorSampleInterval, self, MetricsTick)
override def preStart(): Unit = {
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
index 02c818cf30..9361a4be07 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
@@ -44,7 +44,7 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension {
* Supervision strategy.
*/
private[metrics] val strategy = system.dynamicAccess.createInstanceFor[SupervisorStrategy](
- SupervisorStrategyProvider, immutable.Seq(classOf[Config] -> SupervisorStrategyConfiguration))
+ SupervisorStrategyProvider, immutable.Seq(classOf[Config] → SupervisorStrategyConfiguration))
.getOrElse {
val log: LoggingAdapter = Logging(system, getClass.getName)
log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[ClusterMetricsStrategy].getName}.")
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
index 9e80a1f649..e80d46be40 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
@@ -120,15 +120,16 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS
*/
@SerialVersionUID(1L)
final case class AdaptiveLoadBalancingPool(
- metricsSelector: MetricsSelector = MixMetricsSelector,
- override val nrOfInstances: Int = 0,
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ override val nrOfInstances: Int = 0,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool {
def this(config: Config, dynamicAccess: DynamicAccess) =
- this(nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
+ this(
+ nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
usePoolDispatcher = config.hasPath("pool-dispatcher"))
@@ -148,7 +149,8 @@ final case class AdaptiveLoadBalancingPool(
new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector))
override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
- Some(Props(classOf[AdaptiveLoadBalancingMetricsListener],
+ Some(Props(
+ classOf[AdaptiveLoadBalancingMetricsListener],
routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
/**
@@ -200,13 +202,14 @@ final case class AdaptiveLoadBalancingPool(
*/
@SerialVersionUID(1L)
final case class AdaptiveLoadBalancingGroup(
- metricsSelector: MetricsSelector = MixMetricsSelector,
- override val paths: immutable.Iterable[String] = Nil,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ override val paths: immutable.Iterable[String] = Nil,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config, dynamicAccess: DynamicAccess) =
- this(metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
+ this(
+ metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
paths = immutableSeq(config.getStringList("routees.paths")))
/**
@@ -216,8 +219,9 @@ final case class AdaptiveLoadBalancingGroup(
* @param routeesPaths string representation of the actor paths of the routees, messages are
* sent with [[akka.actor.ActorSelection]] to these paths
*/
- def this(metricsSelector: MetricsSelector,
- routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths))
+ def this(
+ metricsSelector: MetricsSelector,
+ routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths))
override def paths(system: ActorSystem): immutable.Iterable[String] = this.paths
@@ -225,7 +229,8 @@ final case class AdaptiveLoadBalancingGroup(
new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector))
override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
- Some(Props(classOf[AdaptiveLoadBalancingMetricsListener],
+ Some(Props(
+ classOf[AdaptiveLoadBalancingMetricsListener],
routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
/**
@@ -365,9 +370,9 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe
combined.foldLeft(Map.empty[Address, (Double, Int)].withDefaultValue((0.0, 0))) {
case (acc, (address, capacity)) ⇒
val (sum, count) = acc(address)
- acc + (address -> ((sum + capacity, count + 1)))
+ acc + (address → ((sum + capacity, count + 1)))
}.map {
- case (addr, (sum, count)) ⇒ (addr -> sum / count)
+ case (addr, (sum, count)) ⇒ addr → (sum / count)
}
}
@@ -381,7 +386,7 @@ object MetricsSelector {
case "cpu" ⇒ CpuMetricsSelector
case "load" ⇒ SystemLoadAverageMetricsSelector
case fqn ⇒
- val args = List(classOf[Config] -> config)
+ val args = List(classOf[Config] → config)
dynamicAccess.createInstanceFor[MetricsSelector](fqn, args).recover({
case exception ⇒ throw new IllegalArgumentException(
(s"Cannot instantiate metrics-selector [$fqn], " +
@@ -429,7 +434,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector {
val (_, min) = capacity.minBy { case (_, c) ⇒ c }
// lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero
val divisor = math.max(0.01, min)
- capacity map { case (addr, c) ⇒ (addr -> math.round((c) / divisor).toInt) }
+ capacity map { case (addr, c) ⇒ (addr → math.round((c) / divisor).toInt) }
}
}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
index 3f8cf45ae9..dd0338ed14 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
@@ -207,12 +207,12 @@ object StandardMetrics {
*/
@SerialVersionUID(1L)
final case class Cpu(
- address: Address,
- timestamp: Long,
+ address: Address,
+ timestamp: Long,
systemLoadAverage: Option[Double],
- cpuCombined: Option[Double],
- cpuStolen: Option[Double],
- processors: Int) {
+ cpuCombined: Option[Double],
+ cpuStolen: Option[Double],
+ processors: Int) {
cpuCombined match {
case Some(x) ⇒ require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]")
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala
index f33f54d89f..81f46dd11e 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala
@@ -60,7 +60,7 @@ private[metrics] object MetricsCollector {
def create(provider: String) = TryNative {
log.debug(s"Trying ${provider}.")
system.asInstanceOf[ExtendedActorSystem].dynamicAccess
- .createInstanceFor[MetricsCollector](provider, List(classOf[ActorSystem] -> system)).get
+ .createInstanceFor[MetricsCollector](provider, List(classOf[ActorSystem] → system)).get
}
val collector = if (useCustom)
@@ -86,7 +86,8 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics
import StandardMetrics._
private def this(address: Address, settings: ClusterMetricsSettings) =
- this(address,
+ this(
+ address,
EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval))
/**
@@ -193,7 +194,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP
import org.hyperic.sigar.CpuPerc
def this(address: Address, settings: ClusterMetricsSettings, sigar: SigarProxy) =
- this(address,
+ this(
+ address,
EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval),
sigar)
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
index 208493c886..2a48613dc0 100644
--- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
@@ -177,7 +177,8 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS
case NumberType.Float_VALUE ⇒ jl.Float.intBitsToFloat(number.getValue32)
case NumberType.Integer_VALUE ⇒ number.getValue32
case NumberType.Serialized_VALUE ⇒
- val in = new ClassLoaderObjectInputStream(system.dynamicAccess.classLoader,
+ val in = new ClassLoaderObjectInputStream(
+ system.dynamicAccess.classLoader,
new ByteArrayInputStream(number.getSerialized.toByteArray))
val obj = in.readObject
in.close()
diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
index d87163eafd..3c2c8975eb 100644
--- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
+++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
@@ -122,11 +122,11 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa
Await.result(router ? GetRoutees, timeout.duration).asInstanceOf[Routees].routees
def receiveReplies(expectedReplies: Int): Map[Address, Int] = {
- val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0)
+ val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0)
(receiveWhile(5 seconds, messages = expectedReplies) {
case Reply(address) ⇒ address
}).foldLeft(zero) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
}
@@ -139,10 +139,11 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa
}
def startRouter(name: String): ActorRef = {
- val router = system.actorOf(ClusterRouterPool(
- local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
- settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
- props(Props[Echo]),
+ val router = system.actorOf(
+ ClusterRouterPool(
+ local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
+ settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
+ props(Props[Echo]),
name)
// it may take some time until router receives cluster member events
awaitAssert { currentRoutees(router).size should ===(roles.size) }
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
index fa753c56b3..12f011aee8 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
@@ -62,15 +62,15 @@ class MetricsSelectorSpec extends WordSpec with Matchers {
"CapacityMetricsSelector" must {
"calculate weights from capacity" in {
- val capacity = Map(a1 -> 0.6, b1 -> 0.3, c1 -> 0.1)
+ val capacity = Map(a1 → 0.6, b1 → 0.3, c1 → 0.1)
val weights = abstractSelector.weights(capacity)
- weights should ===(Map(c1 -> 1, b1 -> 3, a1 -> 6))
+ weights should ===(Map(c1 → 1, b1 → 3, a1 → 6))
}
"handle low and zero capacity" in {
- val capacity = Map(a1 -> 0.0, b1 -> 1.0, c1 -> 0.005, d1 -> 0.004)
+ val capacity = Map(a1 → 0.0, b1 → 1.0, c1 → 0.005, d1 → 0.004)
val weights = abstractSelector.weights(capacity)
- weights should ===(Map(a1 -> 0, b1 -> 100, c1 -> 1, d1 -> 0))
+ weights should ===(Map(a1 → 0, b1 → 100, c1 → 1, d1 → 0))
}
}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala
index db0d06d2a1..2938213546 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala
@@ -91,7 +91,7 @@ class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollec
} else None
}
}
- streamingDataSet ++= changes.map(m ⇒ m.name -> m)
+ streamingDataSet ++= changes.map(m ⇒ m.name → m)
}
}
}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
index 425b484866..6f6689b3e2 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
@@ -49,11 +49,11 @@ case class SimpleSigarProvider(location: String = "native") extends SigarProvide
* Provide sigar library as static mock.
*/
case class MockitoSigarProvider(
- pid: Long = 123,
+ pid: Long = 123,
loadAverage: Array[Double] = Array(0.7, 0.3, 0.1),
- cpuCombined: Double = 0.5,
- cpuStolen: Double = 0.2,
- steps: Int = 5) extends SigarProvider with MockitoSugar {
+ cpuCombined: Double = 0.5,
+ cpuStolen: Double = 0.2,
+ steps: Int = 5) extends SigarProvider with MockitoSugar {
import org.hyperic.sigar._
import org.mockito.Mockito._
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
index 175977c15d..ffa044a193 100644
--- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
@@ -42,8 +42,10 @@ class MessageSerializerSpec extends AkkaSpec(
"be serializable" in {
- val metricsGossip = MetricsGossip(Set(NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))),
- NodeMetrics(b1.address, 4712, Set(Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))),
+ val metricsGossip = MetricsGossip(Set(
+ NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))),
+ NodeMetrics(b1.address, 4712, Set(
+ Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))),
Metric("bar1", Double.MinPositiveValue, None),
Metric("bar2", Float.MaxValue, None),
Metric("bar3", Int.MaxValue, None),
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala
index 2b9215496f..631d064939 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala
@@ -167,7 +167,8 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
}
private[akka] def requireClusterRole(role: Option[String]): Unit =
- require(role.forall(cluster.selfRoles.contains),
+ require(
+ role.forall(cluster.selfRoles.contains),
s"This cluster member [${cluster.selfAddress}] doesn't have the role [$role]")
/**
@@ -193,11 +194,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
def start(
- typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
allocationStrategy: ShardAllocationStrategy,
handOffStopMessage: Any): ActorRef = {
@@ -232,11 +233,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
def start(
- typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId): ActorRef = {
+ extractShardId: ShardRegion.ExtractShardId): ActorRef = {
val allocationStrategy = new LeastShardAllocationStrategy(
settings.tuningParameters.leastShardAllocationRebalanceThreshold,
@@ -265,18 +266,18 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
def start(
- typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
- messageExtractor: ShardRegion.MessageExtractor,
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ messageExtractor: ShardRegion.MessageExtractor,
allocationStrategy: ShardAllocationStrategy,
handOffStopMessage: Any): ActorRef = {
start(typeName, entityProps, settings,
extractEntityId = {
- case msg if messageExtractor.entityId(msg) ne null ⇒
- (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
- },
+ case msg if messageExtractor.entityId(msg) ne null ⇒
+ (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
+ },
extractShardId = msg ⇒ messageExtractor.shardId(msg),
allocationStrategy = allocationStrategy,
handOffStopMessage = handOffStopMessage)
@@ -301,9 +302,9 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
def start(
- typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
messageExtractor: ShardRegion.MessageExtractor): ActorRef = {
val allocationStrategy = new LeastShardAllocationStrategy(
@@ -333,10 +334,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
def startProxy(
- typeName: String,
- role: Option[String],
+ typeName: String,
+ role: Option[String],
extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId): ActorRef = {
+ extractShardId: ShardRegion.ExtractShardId): ActorRef = {
implicit val timeout = system.settings.CreationTimeout
val settings = ClusterShardingSettings(system).withRole(role)
@@ -363,15 +364,15 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
* @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard
*/
def startProxy(
- typeName: String,
- role: Optional[String],
+ typeName: String,
+ role: Optional[String],
messageExtractor: ShardRegion.MessageExtractor): ActorRef = {
startProxy(typeName, Option(role.orElse(null)),
extractEntityId = {
- case msg if messageExtractor.entityId(msg) ne null ⇒
- (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
- },
+ case msg if messageExtractor.entityId(msg) ne null ⇒
+ (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg))
+ },
extractShardId = msg ⇒ messageExtractor.shardId(msg))
}
@@ -443,21 +444,23 @@ private[akka] class ClusterShardingGuardian extends Actor {
randomFactor = 0.2).withDeploy(Deploy.local)
val singletonSettings = settings.coordinatorSingletonSettings
.withSingletonName("singleton").withRole(role)
- context.actorOf(ClusterSingletonManager.props(
- singletonProps,
- terminationMessage = PoisonPill,
- singletonSettings).withDispatcher(context.props.dispatcher),
+ context.actorOf(
+ ClusterSingletonManager.props(
+ singletonProps,
+ terminationMessage = PoisonPill,
+ singletonSettings).withDispatcher(context.props.dispatcher),
name = cName)
}
- context.actorOf(ShardRegion.props(
- typeName = typeName,
- entityProps = entityProps,
- settings = settings,
- coordinatorPath = cPath,
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- handOffStopMessage = handOffStopMessage).withDispatcher(context.props.dispatcher),
+ context.actorOf(
+ ShardRegion.props(
+ typeName = typeName,
+ entityProps = entityProps,
+ settings = settings,
+ coordinatorPath = cPath,
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ handOffStopMessage = handOffStopMessage).withDispatcher(context.props.dispatcher),
name = encName)
}
sender() ! Started(shardRegion)
@@ -467,12 +470,13 @@ private[akka] class ClusterShardingGuardian extends Actor {
val cName = coordinatorSingletonManagerName(encName)
val cPath = coordinatorPath(encName)
val shardRegion = context.child(encName).getOrElse {
- context.actorOf(ShardRegion.proxyProps(
- typeName = typeName,
- settings = settings,
- coordinatorPath = cPath,
- extractEntityId = extractEntityId,
- extractShardId = extractShardId).withDispatcher(context.props.dispatcher),
+ context.actorOf(
+ ShardRegion.proxyProps(
+ typeName = typeName,
+ settings = settings,
+ coordinatorPath = cPath,
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId).withDispatcher(context.props.dispatcher),
name = encName)
}
sender() ! Started(shardRegion)
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala
index 51597422b2..b77fb80eb8 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala
@@ -71,19 +71,19 @@ object ClusterShardingSettings {
if (role == "") None else Option(role)
class TuningParameters(
- val coordinatorFailureBackoff: FiniteDuration,
- val retryInterval: FiniteDuration,
- val bufferSize: Int,
- val handOffTimeout: FiniteDuration,
- val shardStartTimeout: FiniteDuration,
- val shardFailureBackoff: FiniteDuration,
- val entityRestartBackoff: FiniteDuration,
- val rebalanceInterval: FiniteDuration,
- val snapshotAfter: Int,
- val leastShardAllocationRebalanceThreshold: Int,
+ val coordinatorFailureBackoff: FiniteDuration,
+ val retryInterval: FiniteDuration,
+ val bufferSize: Int,
+ val handOffTimeout: FiniteDuration,
+ val shardStartTimeout: FiniteDuration,
+ val shardFailureBackoff: FiniteDuration,
+ val entityRestartBackoff: FiniteDuration,
+ val rebalanceInterval: FiniteDuration,
+ val snapshotAfter: Int,
+ val leastShardAllocationRebalanceThreshold: Int,
val leastShardAllocationMaxSimultaneousRebalance: Int,
- val waitingForStateTimeout: FiniteDuration,
- val updatingStateTimeout: FiniteDuration)
+ val waitingForStateTimeout: FiniteDuration,
+ val updatingStateTimeout: FiniteDuration)
}
/**
@@ -102,15 +102,16 @@ object ClusterShardingSettings {
* @param tuningParameters additional tuning parameters, see descriptions in reference.conf
*/
final class ClusterShardingSettings(
- val role: Option[String],
- val rememberEntities: Boolean,
- val journalPluginId: String,
- val snapshotPluginId: String,
- val stateStoreMode: String,
- val tuningParameters: ClusterShardingSettings.TuningParameters,
+ val role: Option[String],
+ val rememberEntities: Boolean,
+ val journalPluginId: String,
+ val snapshotPluginId: String,
+ val stateStoreMode: String,
+ val tuningParameters: ClusterShardingSettings.TuningParameters,
val coordinatorSingletonSettings: ClusterSingletonManagerSettings) extends NoSerializationVerificationNeeded {
- require(stateStoreMode == "persistence" || stateStoreMode == "ddata",
+ require(
+ stateStoreMode == "persistence" || stateStoreMode == "ddata",
s"Unknown 'state-store-mode' [$stateStoreMode], valid values are 'persistence' or 'ddata'")
def withRole(role: String): ClusterShardingSettings = copy(role = ClusterShardingSettings.roleOption(role))
@@ -139,13 +140,14 @@ final class ClusterShardingSettings(
def withCoordinatorSingletonSettings(coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings =
copy(coordinatorSingletonSettings = coordinatorSingletonSettings)
- private def copy(role: Option[String] = role,
- rememberEntities: Boolean = rememberEntities,
- journalPluginId: String = journalPluginId,
- snapshotPluginId: String = snapshotPluginId,
- stateStoreMode: String = stateStoreMode,
- tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters,
- coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings): ClusterShardingSettings =
+ private def copy(
+ role: Option[String] = role,
+ rememberEntities: Boolean = rememberEntities,
+ journalPluginId: String = journalPluginId,
+ snapshotPluginId: String = snapshotPluginId,
+ stateStoreMode: String = stateStoreMode,
+ tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters,
+ coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings): ClusterShardingSettings =
new ClusterShardingSettings(
role,
rememberEntities,
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala
index cced284598..25606fc982 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala
@@ -92,7 +92,8 @@ object RemoveInternalClusterShardingData {
}
val completion = Promise[Unit]()
- system.actorOf(props(journalPluginId, typeNames, completion, remove2dot3Data),
+ system.actorOf(
+ props(journalPluginId, typeNames, completion, remove2dot3Data),
name = "removeInternalClusterShardingData")
completion.future
}
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala
index fa029f5f91..e8381da165 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala
@@ -80,13 +80,14 @@ private[akka] object Shard {
* If `settings.rememberEntities` is enabled the `PersistentShard`
* subclass is used, otherwise `Shard`.
*/
- def props(typeName: String,
- shardId: ShardRegion.ShardId,
- entityProps: Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
- handOffStopMessage: Any): Props = {
+ def props(
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
+ handOffStopMessage: Any): Props = {
if (settings.rememberEntities)
Props(new PersistentShard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage))
.withDeploy(Deploy.local)
@@ -105,12 +106,12 @@ private[akka] object Shard {
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
private[akka] class Shard(
- typeName: String,
- shardId: ShardRegion.ShardId,
- entityProps: Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
handOffStopMessage: Any) extends Actor with ActorLogging {
import ShardRegion.{ handOffStopperProps, EntityId, Msg, Passivate, ShardInitialized }
@@ -301,12 +302,12 @@ private[akka] class Shard(
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
private[akka] class PersistentShard(
- typeName: String,
- shardId: ShardRegion.ShardId,
- entityProps: Props,
- settings: ClusterShardingSettings,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
+ typeName: String,
+ shardId: ShardRegion.ShardId,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
handOffStopMessage: Any) extends Shard(
typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage)
with PersistentActor with ActorLogging {
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala
index f28031aaeb..370a635f1f 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala
@@ -43,7 +43,7 @@ object ShardCoordinator {
*/
private[akka] def props(typeName: String, settings: ClusterShardingSettings,
allocationStrategy: ShardAllocationStrategy,
- replicator: ActorRef): Props =
+ replicator: ActorRef): Props =
Props(new DDataShardCoordinator(typeName: String, settings, allocationStrategy, replicator)).withDeploy(Deploy.local)
/**
@@ -73,8 +73,9 @@ object ShardCoordinator {
* you should not include these in the returned set
* @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round
*/
- def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
- rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]]
+ def rebalance(
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
+ rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]]
}
/**
@@ -89,8 +90,9 @@ object ShardCoordinator {
allocateShard(requester, shardId, currentShardAllocations.asJava)
}
- override final def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
- rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
+ override final def rebalance(
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
+ rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
import scala.collection.JavaConverters._
implicit val ec = ExecutionContexts.sameThreadExecutionContext
rebalance(currentShardAllocations.asJava, rebalanceInProgress.asJava).map(_.asScala.toSet)
@@ -117,8 +119,9 @@ object ShardCoordinator {
* you should not include these in the returned set
* @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round
*/
- def rebalance(currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]],
- rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]]
+ def rebalance(
+ currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]],
+ rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]]
}
private val emptyRebalanceResult = Future.successful(Set.empty[ShardId])
@@ -141,8 +144,9 @@ object ShardCoordinator {
Future.successful(regionWithLeastShards)
}
- override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
- rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
+ override def rebalance(
+ currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]],
+ rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = {
if (rebalanceInProgress.size < maxSimultaneousRebalance) {
val (regionWithLeastShards, leastShards) = currentShardAllocations.minBy { case (_, v) ⇒ v.size }
val mostShards = currentShardAllocations.collect {
@@ -255,10 +259,10 @@ object ShardCoordinator {
// region for each shard
shards: Map[ShardId, ActorRef] = Map.empty,
// shards for each region
- regions: Map[ActorRef, Vector[ShardId]] = Map.empty,
- regionProxies: Set[ActorRef] = Set.empty,
- unallocatedShards: Set[ShardId] = Set.empty,
- rememberEntities: Boolean = false) extends ClusterShardingSerializable {
+ regions: Map[ActorRef, Vector[ShardId]] = Map.empty,
+ regionProxies: Set[ActorRef] = Set.empty,
+ unallocatedShards: Set[ShardId] = Set.empty,
+ rememberEntities: Boolean = false) extends ClusterShardingSerializable {
def withRememberEntities(enabled: Boolean): State = {
if (enabled)
@@ -550,7 +554,7 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti
implicit val timeout: Timeout = waitMax
Future.sequence(aliveRegions.map { regionActor ⇒
(regionActor ? ShardRegion.GetShardRegionStats).mapTo[ShardRegion.ShardRegionStats]
- .map(stats ⇒ regionActor -> stats)
+ .map(stats ⇒ regionActor → stats)
}).map { allRegionStats ⇒
ShardRegion.ClusterShardingStats(allRegionStats.map {
case (region, stats) ⇒
@@ -559,7 +563,7 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti
if (regionAddress.hasLocalScope && regionAddress.system == cluster.selfAddress.system) cluster.selfAddress
else regionAddress
- address -> stats
+ address → stats
}.toMap)
}.recover {
case x: AskTimeoutException ⇒ ShardRegion.ClusterShardingStats(Map.empty)
@@ -688,7 +692,8 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti
getShardHomeSender ! ShardHome(evt.shard, evt.region)
}
} else
- log.debug("Allocated region {} for shard [{}] is not (any longer) one of the registered regions: {}",
+ log.debug(
+ "Allocated region {} for shard [{}] is not (any longer) one of the registered regions: {}",
region, shard, state)
}
}
@@ -805,7 +810,7 @@ class PersistentShardCoordinator(typeName: String, settings: ClusterShardingSett
*/
class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
allocationStrategy: ShardCoordinator.ShardAllocationStrategy,
- replicator: ActorRef)
+ replicator: ActorRef)
extends ShardCoordinator(typeName, settings, allocationStrategy) with Stash {
import ShardCoordinator.Internal._
import akka.cluster.ddata.Replicator.Update
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala
index 96f04799cd..c73eeb8f64 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala
@@ -29,12 +29,12 @@ object ShardRegion {
* Factory method for the [[akka.actor.Props]] of the [[ShardRegion]] actor.
*/
private[akka] def props(
- typeName: String,
- entityProps: Props,
- settings: ClusterShardingSettings,
- coordinatorPath: String,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
+ typeName: String,
+ entityProps: Props,
+ settings: ClusterShardingSettings,
+ coordinatorPath: String,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
handOffStopMessage: Any): Props =
Props(new ShardRegion(typeName, Some(entityProps), settings, coordinatorPath, extractEntityId,
extractShardId, handOffStopMessage)).withDeploy(Deploy.local)
@@ -45,11 +45,11 @@ object ShardRegion {
* when using it in proxy only mode.
*/
private[akka] def proxyProps(
- typeName: String,
- settings: ClusterShardingSettings,
+ typeName: String,
+ settings: ClusterShardingSettings,
coordinatorPath: String,
extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId): Props =
+ extractShardId: ShardRegion.ExtractShardId): Props =
Props(new ShardRegion(typeName, None, settings, coordinatorPath, extractEntityId, extractShardId, PoisonPill))
.withDeploy(Deploy.local)
@@ -337,12 +337,12 @@ object ShardRegion {
* @see [[ClusterSharding$ ClusterSharding extension]]
*/
class ShardRegion(
- typeName: String,
- entityProps: Option[Props],
- settings: ClusterShardingSettings,
- coordinatorPath: String,
- extractEntityId: ShardRegion.ExtractEntityId,
- extractShardId: ShardRegion.ExtractShardId,
+ typeName: String,
+ entityProps: Option[Props],
+ settings: ClusterShardingSettings,
+ coordinatorPath: String,
+ extractEntityId: ShardRegion.ExtractEntityId,
+ extractShardId: ShardRegion.ExtractShardId,
handOffStopMessage: Any) extends Actor with ActorLogging {
import ShardCoordinator.Internal._
@@ -609,7 +609,8 @@ class ShardRegion(
def register(): Unit = {
coordinatorSelection.foreach(_ ! registrationMessage)
if (shardBuffers.nonEmpty && retryCount >= 5)
- log.warning("Trying to register to coordinator at [{}], but no acknowledgement. Total [{}] buffered messages.",
+ log.warning(
+ "Trying to register to coordinator at [{}], but no acknowledgement. Total [{}] buffered messages.",
coordinatorSelection, totalBufferSize)
}
diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala
index e9071c6579..48d297de0a 100644
--- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala
+++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala
@@ -64,33 +64,33 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy
private val ShardStatsManifest = "DB"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef](
- EntityStateManifest -> entityStateFromBinary,
- EntityStartedManifest -> entityStartedFromBinary,
- EntityStoppedManifest -> entityStoppedFromBinary,
+ EntityStateManifest → entityStateFromBinary,
+ EntityStartedManifest → entityStartedFromBinary,
+ EntityStoppedManifest → entityStoppedFromBinary,
- CoordinatorStateManifest -> coordinatorStateFromBinary,
- ShardRegionRegisteredManifest -> { bytes ⇒ ShardRegionRegistered(actorRefMessageFromBinary(bytes)) },
- ShardRegionProxyRegisteredManifest -> { bytes ⇒ ShardRegionProxyRegistered(actorRefMessageFromBinary(bytes)) },
- ShardRegionTerminatedManifest -> { bytes ⇒ ShardRegionTerminated(actorRefMessageFromBinary(bytes)) },
- ShardRegionProxyTerminatedManifest -> { bytes ⇒ ShardRegionProxyTerminated(actorRefMessageFromBinary(bytes)) },
- ShardHomeAllocatedManifest -> shardHomeAllocatedFromBinary,
- ShardHomeDeallocatedManifest -> { bytes ⇒ ShardHomeDeallocated(shardIdMessageFromBinary(bytes)) },
+ CoordinatorStateManifest → coordinatorStateFromBinary,
+ ShardRegionRegisteredManifest → { bytes ⇒ ShardRegionRegistered(actorRefMessageFromBinary(bytes)) },
+ ShardRegionProxyRegisteredManifest → { bytes ⇒ ShardRegionProxyRegistered(actorRefMessageFromBinary(bytes)) },
+ ShardRegionTerminatedManifest → { bytes ⇒ ShardRegionTerminated(actorRefMessageFromBinary(bytes)) },
+ ShardRegionProxyTerminatedManifest → { bytes ⇒ ShardRegionProxyTerminated(actorRefMessageFromBinary(bytes)) },
+ ShardHomeAllocatedManifest → shardHomeAllocatedFromBinary,
+ ShardHomeDeallocatedManifest → { bytes ⇒ ShardHomeDeallocated(shardIdMessageFromBinary(bytes)) },
- RegisterManifest -> { bytes ⇒ Register(actorRefMessageFromBinary(bytes)) },
- RegisterProxyManifest -> { bytes ⇒ RegisterProxy(actorRefMessageFromBinary(bytes)) },
- RegisterAckManifest -> { bytes ⇒ RegisterAck(actorRefMessageFromBinary(bytes)) },
- GetShardHomeManifest -> { bytes ⇒ GetShardHome(shardIdMessageFromBinary(bytes)) },
- ShardHomeManifest -> shardHomeFromBinary,
- HostShardManifest -> { bytes ⇒ HostShard(shardIdMessageFromBinary(bytes)) },
- ShardStartedManifest -> { bytes ⇒ ShardStarted(shardIdMessageFromBinary(bytes)) },
- BeginHandOffManifest -> { bytes ⇒ BeginHandOff(shardIdMessageFromBinary(bytes)) },
- BeginHandOffAckManifest -> { bytes ⇒ BeginHandOffAck(shardIdMessageFromBinary(bytes)) },
- HandOffManifest -> { bytes ⇒ HandOff(shardIdMessageFromBinary(bytes)) },
- ShardStoppedManifest -> { bytes ⇒ ShardStopped(shardIdMessageFromBinary(bytes)) },
- GracefulShutdownReqManifest -> { bytes ⇒ GracefulShutdownReq(actorRefMessageFromBinary(bytes)) },
+ RegisterManifest → { bytes ⇒ Register(actorRefMessageFromBinary(bytes)) },
+ RegisterProxyManifest → { bytes ⇒ RegisterProxy(actorRefMessageFromBinary(bytes)) },
+ RegisterAckManifest → { bytes ⇒ RegisterAck(actorRefMessageFromBinary(bytes)) },
+ GetShardHomeManifest → { bytes ⇒ GetShardHome(shardIdMessageFromBinary(bytes)) },
+ ShardHomeManifest → shardHomeFromBinary,
+ HostShardManifest → { bytes ⇒ HostShard(shardIdMessageFromBinary(bytes)) },
+ ShardStartedManifest → { bytes ⇒ ShardStarted(shardIdMessageFromBinary(bytes)) },
+ BeginHandOffManifest → { bytes ⇒ BeginHandOff(shardIdMessageFromBinary(bytes)) },
+ BeginHandOffAckManifest → { bytes ⇒ BeginHandOffAck(shardIdMessageFromBinary(bytes)) },
+ HandOffManifest → { bytes ⇒ HandOff(shardIdMessageFromBinary(bytes)) },
+ ShardStoppedManifest → { bytes ⇒ ShardStopped(shardIdMessageFromBinary(bytes)) },
+ GracefulShutdownReqManifest → { bytes ⇒ GracefulShutdownReq(actorRefMessageFromBinary(bytes)) },
- GetShardStatsManifest -> { bytes ⇒ GetShardStats },
- ShardStatsManifest -> { bytes ⇒ shardStatsFromBinary(bytes) })
+ GetShardStatsManifest → { bytes ⇒ GetShardStats },
+ ShardStatsManifest → { bytes ⇒ shardStatsFromBinary(bytes) })
override def manifest(obj: AnyRef): String = obj match {
case _: EntityState ⇒ EntityStateManifest
@@ -194,11 +194,11 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy
private def coordinatorStateFromProto(state: sm.CoordinatorState): State = {
val shards: Map[String, ActorRef] =
state.getShardsList.asScala.toVector.map { entry ⇒
- entry.getShardId -> resolveActorRef(entry.getRegionRef)
+ entry.getShardId → resolveActorRef(entry.getRegionRef)
}(breakOut)
val regionsZero: Map[ActorRef, Vector[String]] =
- state.getRegionsList.asScala.toVector.map(resolveActorRef(_) -> Vector.empty[String])(breakOut)
+ state.getRegionsList.asScala.toVector.map(resolveActorRef(_) → Vector.empty[String])(breakOut)
val regions: Map[ActorRef, Vector[String]] =
shards.foldLeft(regionsZero) { case (acc, (shardId, regionRef)) ⇒ acc.updated(regionRef, acc(regionRef) :+ shardId) }
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala
index ff357a6484..fb88783782 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala
@@ -177,7 +177,7 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf
val locations = (for (n ← 1 to 10) yield {
val id = n.toString
region ! Ping(id)
- id -> expectMsgType[ActorRef]
+ id → expectMsgType[ActorRef]
}).toMap
shardLocations ! Locations(locations)
}
diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala
index 8ebae74b5e..0788c895cb 100644
--- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala
+++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala
@@ -256,10 +256,11 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
minBackoff = 5.seconds,
maxBackoff = 5.seconds,
randomFactor = 0.1).withDeploy(Deploy.local)
- system.actorOf(ClusterSingletonManager.props(
- singletonProps,
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ system.actorOf(
+ ClusterSingletonManager.props(
+ singletonProps,
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system)),
name = typeName + "Coordinator")
}
}
@@ -273,14 +274,15 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
val settings = ClusterShardingSettings(cfg)
.withRememberEntities(rememberEntities)
- system.actorOf(ShardRegion.props(
- typeName = typeName,
- entityProps = qualifiedCounterProps(typeName),
- settings = settings,
- coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator",
- extractEntityId = extractEntityId,
- extractShardId = extractShardId,
- handOffStopMessage = PoisonPill),
+ system.actorOf(
+ ShardRegion.props(
+ typeName = typeName,
+ entityProps = qualifiedCounterProps(typeName),
+ settings = settings,
+ coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator",
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId,
+ handOffStopMessage = PoisonPill),
name = typeName + "Region")
}
@@ -398,12 +400,13 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
buffer-size = 1000
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
val settings = ClusterShardingSettings(cfg)
- val proxy = system.actorOf(ShardRegion.proxyProps(
- typeName = "counter",
- settings,
- coordinatorPath = "/user/counterCoordinator/singleton/coordinator",
- extractEntityId = extractEntityId,
- extractShardId = extractShardId),
+ val proxy = system.actorOf(
+ ShardRegion.proxyProps(
+ typeName = "counter",
+ settings,
+ coordinatorPath = "/user/counterCoordinator/singleton/coordinator",
+ extractEntityId = extractEntityId,
+ extractShardId = extractShardId),
name = "regionProxy")
proxy ! Get(1)
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala
index ecb2fbc6a5..8717e9eb22 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala
@@ -19,13 +19,13 @@ class LeastShardAllocationStrategySpec extends AkkaSpec {
"LeastShardAllocationStrategy" must {
"allocate to region with least number of shards" in {
- val allocations = Map(regionA -> Vector("shard1"), regionB -> Vector("shard2"), regionC -> Vector.empty)
+ val allocations = Map(regionA → Vector("shard1"), regionB → Vector("shard2"), regionC → Vector.empty)
Await.result(allocationStrategy.allocateShard(regionA, "shard3", allocations), 3.seconds) should ===(regionC)
}
"rebalance from region with most number of shards" in {
- val allocations = Map(regionA -> Vector("shard1"), regionB -> Vector("shard2", "shard3"),
- regionC -> Vector.empty)
+ val allocations = Map(regionA → Vector("shard1"), regionB → Vector("shard2", "shard3"),
+ regionC → Vector.empty)
// so far regionB has 2 shards and regionC has 0 shards, but the diff is less than rebalanceThreshold
Await.result(allocationStrategy.rebalance(allocations, Set.empty), 3.seconds) should ===(Set.empty[String])
@@ -39,8 +39,9 @@ class LeastShardAllocationStrategySpec extends AkkaSpec {
}
"must limit number of simultanious rebalance" in {
- val allocations = Map(regionA -> Vector("shard1"),
- regionB -> Vector("shard2", "shard3", "shard4", "shard5", "shard6"), regionC -> Vector.empty)
+ val allocations = Map(
+ regionA → Vector("shard1"),
+ regionB → Vector("shard2", "shard3", "shard4", "shard5", "shard6"), regionC → Vector.empty)
Await.result(allocationStrategy.rebalance(allocations, Set("shard2")), 3.seconds) should ===(Set("shard3"))
Await.result(allocationStrategy.rebalance(allocations, Set("shard2", "shard3")), 3.seconds) should ===(Set.empty[String])
diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala
index 45ef2b31d1..fc4b725481 100644
--- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala
+++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala
@@ -30,8 +30,8 @@ class ClusterShardingMessageSerializerSpec extends AkkaSpec {
"be able to serializable ShardCoordinator snapshot State" in {
val state = State(
- shards = Map("a" -> region1, "b" -> region2, "c" -> region2),
- regions = Map(region1 -> Vector("a"), region2 -> Vector("b", "c"), region3 -> Vector.empty[String]),
+ shards = Map("a" → region1, "b" → region2, "c" → region2),
+ regions = Map(region1 → Vector("a"), region2 → Vector("b", "c"), region3 → Vector.empty[String]),
regionProxies = Set(regionProxy1, regionProxy2),
unallocatedShards = Set("d"))
checkSerialization(state)
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala
index 54062f2846..17d7da0d17 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala
@@ -108,13 +108,13 @@ object ClusterClientSettings {
* external service registry
*/
final class ClusterClientSettings(
- val initialContacts: Set[ActorPath],
+ val initialContacts: Set[ActorPath],
val establishingGetContactsInterval: FiniteDuration,
- val refreshContactsInterval: FiniteDuration,
- val heartbeatInterval: FiniteDuration,
- val acceptableHeartbeatPause: FiniteDuration,
- val bufferSize: Int,
- val reconnectTimeout: Option[FiniteDuration]) extends NoSerializationVerificationNeeded {
+ val refreshContactsInterval: FiniteDuration,
+ val heartbeatInterval: FiniteDuration,
+ val acceptableHeartbeatPause: FiniteDuration,
+ val bufferSize: Int,
+ val reconnectTimeout: Option[FiniteDuration]) extends NoSerializationVerificationNeeded {
require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000")
@@ -122,12 +122,12 @@ final class ClusterClientSettings(
* For binary/source compatibility
*/
def this(
- initialContacts: Set[ActorPath],
+ initialContacts: Set[ActorPath],
establishingGetContactsInterval: FiniteDuration,
- refreshContactsInterval: FiniteDuration,
- heartbeatInterval: FiniteDuration,
- acceptableHeartbeatPause: FiniteDuration,
- bufferSize: Int) =
+ refreshContactsInterval: FiniteDuration,
+ heartbeatInterval: FiniteDuration,
+ acceptableHeartbeatPause: FiniteDuration,
+ bufferSize: Int) =
this(initialContacts, establishingGetContactsInterval, refreshContactsInterval, heartbeatInterval,
acceptableHeartbeatPause, bufferSize, None)
@@ -163,13 +163,13 @@ final class ClusterClientSettings(
copy(reconnectTimeout = reconnectTimeout)
private def copy(
- initialContacts: Set[ActorPath] = initialContacts,
- establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval,
- refreshContactsInterval: FiniteDuration = refreshContactsInterval,
- heartbeatInterval: FiniteDuration = heartbeatInterval,
- acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
- bufferSize: Int = bufferSize,
- reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings =
+ initialContacts: Set[ActorPath] = initialContacts,
+ establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval,
+ refreshContactsInterval: FiniteDuration = refreshContactsInterval,
+ heartbeatInterval: FiniteDuration = heartbeatInterval,
+ acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
+ bufferSize: Int = bufferSize,
+ reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings =
new ClusterClientSettings(initialContacts, establishingGetContactsInterval, refreshContactsInterval,
heartbeatInterval, acceptableHeartbeatPause, bufferSize, reconnectTimeout)
}
@@ -629,8 +629,8 @@ object ClusterReceptionistSettings {
* client will be stopped after this time of inactivity.
*/
final class ClusterReceptionistSettings(
- val role: Option[String],
- val numberOfContacts: Int,
+ val role: Option[String],
+ val numberOfContacts: Int,
val responseTunnelReceiveTimeout: FiniteDuration) extends NoSerializationVerificationNeeded {
def withRole(role: String): ClusterReceptionistSettings = copy(role = ClusterReceptionistSettings.roleOption(role))
@@ -644,7 +644,7 @@ final class ClusterReceptionistSettings(
copy(responseTunnelReceiveTimeout = responseTunnelReceiveTimeout)
def withHeartbeat(
- heartbeatInterval: FiniteDuration,
+ heartbeatInterval: FiniteDuration,
acceptableHeartbeatPause: FiniteDuration,
failureDetectionInterval: FiniteDuration): ClusterReceptionistSettings =
copy(
@@ -671,12 +671,12 @@ final class ClusterReceptionistSettings(
private var _failureDetectionInterval: FiniteDuration = 2.second
def this(
- role: Option[String],
- numberOfContacts: Int,
+ role: Option[String],
+ numberOfContacts: Int,
responseTunnelReceiveTimeout: FiniteDuration,
- heartbeatInterval: FiniteDuration,
- acceptableHeartbeatPause: FiniteDuration,
- failureDetectionInterval: FiniteDuration) = {
+ heartbeatInterval: FiniteDuration,
+ acceptableHeartbeatPause: FiniteDuration,
+ failureDetectionInterval: FiniteDuration) = {
this(role, numberOfContacts, responseTunnelReceiveTimeout)
this._heartbeatInterval = heartbeatInterval
this._acceptableHeartbeatPause = acceptableHeartbeatPause
@@ -686,12 +686,12 @@ final class ClusterReceptionistSettings(
// END BINARY COMPATIBILITY
private def copy(
- role: Option[String] = role,
- numberOfContacts: Int = numberOfContacts,
+ role: Option[String] = role,
+ numberOfContacts: Int = numberOfContacts,
responseTunnelReceiveTimeout: FiniteDuration = responseTunnelReceiveTimeout,
- heartbeatInterval: FiniteDuration = heartbeatInterval,
- acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
- failureDetectionInterval: FiniteDuration = failureDetectionInterval): ClusterReceptionistSettings =
+ heartbeatInterval: FiniteDuration = heartbeatInterval,
+ acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause,
+ failureDetectionInterval: FiniteDuration = failureDetectionInterval): ClusterReceptionistSettings =
new ClusterReceptionistSettings(
role,
numberOfContacts,
@@ -787,7 +787,7 @@ object ClusterReceptionist {
*/
def props(
pubSubMediator: ActorRef,
- settings: ClusterReceptionistSettings): Props =
+ settings: ClusterReceptionistSettings): Props =
Props(new ClusterReceptionist(pubSubMediator, settings)).withDeploy(Deploy.local)
/**
@@ -858,7 +858,8 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep
val verboseHeartbeat = cluster.settings.Debug.VerboseHeartbeatLogging
import cluster.selfAddress
- require(role.forall(cluster.selfRoles.contains),
+ require(
+ role.forall(cluster.selfRoles.contains),
s"This cluster member [$selfAddress] doesn't have the role [$role]")
var nodes: immutable.SortedSet[Address] = {
@@ -999,7 +1000,7 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep
case None ⇒
val failureDetector = new DeadlineFailureDetector(acceptableHeartbeatPause, heartbeatInterval)
failureDetector.heartbeat()
- clientInteractions = clientInteractions + (client -> failureDetector)
+ clientInteractions = clientInteractions + (client → failureDetector)
log.debug("Received new contact from [{}]", client.path)
val clusterClientUp = ClusterClientUp(client)
subscribers.foreach(_ ! clusterClientUp)
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala
index ce9e743f24..1a2f2bb5ab 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala
@@ -28,10 +28,10 @@ private[akka] class ClusterClientMessageSerializer(val system: ExtendedActorSyst
private val emptyByteArray = Array.empty[Byte]
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef](
- ContactsManifest -> contactsFromBinary,
- GetContactsManifest -> { _ ⇒ GetContacts },
- HeartbeatManifest -> { _ ⇒ Heartbeat },
- HeartbeatRspManifest -> { _ ⇒ HeartbeatRsp })
+ ContactsManifest → contactsFromBinary,
+ GetContactsManifest → { _ ⇒ GetContacts },
+ HeartbeatManifest → { _ ⇒ Heartbeat },
+ HeartbeatRspManifest → { _ ⇒ HeartbeatRsp })
override def manifest(obj: AnyRef): String = obj match {
case _: Contacts ⇒ ContactsManifest
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala
index a56c5cef8d..9689a4206d 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala
@@ -83,13 +83,14 @@ object DistributedPubSubSettings {
* the registries. Next chunk will be transferred in next round of gossip.
*/
final class DistributedPubSubSettings(
- val role: Option[String],
- val routingLogic: RoutingLogic,
- val gossipInterval: FiniteDuration,
+ val role: Option[String],
+ val routingLogic: RoutingLogic,
+ val gossipInterval: FiniteDuration,
val removedTimeToLive: FiniteDuration,
- val maxDeltaElements: Int) extends NoSerializationVerificationNeeded {
+ val maxDeltaElements: Int) extends NoSerializationVerificationNeeded {
- require(!routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
+ require(
+ !routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
"'ConsistentHashingRoutingLogic' can't be used by the pub-sub mediator")
def withRole(role: String): DistributedPubSubSettings = copy(role = DistributedPubSubSettings.roleOption(role))
@@ -108,11 +109,12 @@ final class DistributedPubSubSettings(
def withMaxDeltaElements(maxDeltaElements: Int): DistributedPubSubSettings =
copy(maxDeltaElements = maxDeltaElements)
- private def copy(role: Option[String] = role,
- routingLogic: RoutingLogic = routingLogic,
- gossipInterval: FiniteDuration = gossipInterval,
- removedTimeToLive: FiniteDuration = removedTimeToLive,
- maxDeltaElements: Int = maxDeltaElements): DistributedPubSubSettings =
+ private def copy(
+ role: Option[String] = role,
+ routingLogic: RoutingLogic = routingLogic,
+ gossipInterval: FiniteDuration = gossipInterval,
+ removedTimeToLive: FiniteDuration = removedTimeToLive,
+ maxDeltaElements: Int = maxDeltaElements): DistributedPubSubSettings =
new DistributedPubSubSettings(role, routingLogic, gossipInterval, removedTimeToLive, maxDeltaElements)
}
@@ -209,7 +211,7 @@ object DistributedPubSubMediator {
@SerialVersionUID(1L)
final case class Bucket(
- owner: Address,
+ owner: Address,
version: Long,
content: TreeMap[String, ValueHolder])
@@ -477,13 +479,15 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act
import DistributedPubSubMediator.Internal._
import settings._
- require(!routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
+ require(
+ !routingLogic.isInstanceOf[ConsistentHashingRoutingLogic],
"'consistent-hashing' routing logic can't be used by the pub-sub mediator")
val cluster = Cluster(context.system)
import cluster.selfAddress
- require(role.forall(cluster.selfRoles.contains),
+ require(
+ role.forall(cluster.selfRoles.contains),
s"This cluster member [${selfAddress}] doesn't have the role [$role]")
val removedTimeToLiveMillis = removedTimeToLive.toMillis
@@ -629,7 +633,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act
if (nodes(b.owner)) {
val myBucket = registry(b.owner)
if (b.version > myBucket.version) {
- registry += (b.owner -> myBucket.copy(version = b.version, content = myBucket.content ++ b.content))
+ registry += (b.owner → myBucket.copy(version = b.version, content = myBucket.content ++ b.content))
}
}
}
@@ -719,8 +723,9 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act
def put(key: String, valueOption: Option[ActorRef]): Unit = {
val bucket = registry(selfAddress)
val v = nextVersion()
- registry += (selfAddress -> bucket.copy(version = v,
- content = bucket.content + (key -> ValueHolder(v, valueOption))))
+ registry += (selfAddress → bucket.copy(
+ version = v,
+ content = bucket.content + (key → ValueHolder(v, valueOption))))
}
def getCurrentTopics(): Set[String] = {
@@ -743,11 +748,11 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act
def mkKey(path: ActorPath): String = Internal.mkKey(path)
- def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) ⇒ (owner -> bucket.version) }
+ def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) ⇒ (owner → bucket.version) }
def collectDelta(otherVersions: Map[Address, Long]): immutable.Iterable[Bucket] = {
// missing entries are represented by version 0
- val filledOtherVersions = myVersions.map { case (k, _) ⇒ k -> 0L } ++ otherVersions
+ val filledOtherVersions = myVersions.map { case (k, _) ⇒ k → 0L } ++ otherVersions
var count = 0
filledOtherVersions.collect {
case (owner, v) if registry(owner).version > v && count < maxDeltaElements ⇒
@@ -791,7 +796,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act
case (key, ValueHolder(version, None)) if (bucket.version - version > removedTimeToLiveMillis) ⇒ key
}
if (oldRemoved.nonEmpty)
- registry += owner -> bucket.copy(content = bucket.content -- oldRemoved)
+ registry += owner → bucket.copy(content = bucket.content -- oldRemoved)
}
}
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala
index 43efd6caf0..41237ea674 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala
@@ -44,5 +44,5 @@ private[pubsub] trait PerGroupingBuffer {
}
}
- def initializeGrouping(grouping: String): Unit = buffers += grouping -> Vector.empty
+ def initializeGrouping(grouping: String): Unit = buffers += grouping → Vector.empty
}
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala
index 9bfd99a166..6060746193 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala
@@ -39,11 +39,11 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor
private val PublishManifest = "E"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef](
- StatusManifest -> statusFromBinary,
- DeltaManifest -> deltaFromBinary,
- SendManifest -> sendFromBinary,
- SendToAllManifest -> sendToAllFromBinary,
- PublishManifest -> publishFromBinary)
+ StatusManifest → statusFromBinary,
+ DeltaManifest → deltaFromBinary,
+ SendManifest → sendFromBinary,
+ SendToAllManifest → sendToAllFromBinary,
+ PublishManifest → publishFromBinary)
override def manifest(obj: AnyRef): String = obj match {
case _: Status ⇒ StatusManifest
@@ -122,7 +122,7 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor
private def statusFromProto(status: dm.Status): Status =
Status(status.getVersionsList.asScala.map(v ⇒
- addressFromProto(v.getAddress) -> v.getTimestamp)(breakOut))
+ addressFromProto(v.getAddress) → v.getTimestamp)(breakOut))
private def deltaToProto(delta: Delta): dm.Delta = {
val buckets = delta.buckets.map { b ⇒
@@ -148,7 +148,7 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor
private def deltaFromProto(delta: dm.Delta): Delta =
Delta(delta.getBucketsList.asScala.toVector.map { b ⇒
val content: TreeMap[String, ValueHolder] = b.getContentList.asScala.map { entry ⇒
- entry.getKey -> ValueHolder(entry.getVersion, if (entry.hasRef) Some(resolveActorRef(entry.getRef)) else None)
+ entry.getKey → ValueHolder(entry.getVersion, if (entry.hasRef) Some(resolveActorRef(entry.getRef)) else None)
}(breakOut)
Bucket(addressFromProto(b.getOwner), b.getVersion, content)
})
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala
index 975c79083d..0383f294c3 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala
@@ -86,9 +86,9 @@ object ClusterSingletonManagerSettings {
* (+ `removalMargin`).
*/
final class ClusterSingletonManagerSettings(
- val singletonName: String,
- val role: Option[String],
- val removalMargin: FiniteDuration,
+ val singletonName: String,
+ val role: Option[String],
+ val removalMargin: FiniteDuration,
val handOverRetryInterval: FiniteDuration) extends NoSerializationVerificationNeeded {
def withSingletonName(name: String): ClusterSingletonManagerSettings = copy(singletonName = name)
@@ -103,10 +103,11 @@ final class ClusterSingletonManagerSettings(
def withHandOverRetryInterval(retryInterval: FiniteDuration): ClusterSingletonManagerSettings =
copy(handOverRetryInterval = retryInterval)
- private def copy(singletonName: String = singletonName,
- role: Option[String] = role,
- removalMargin: FiniteDuration = removalMargin,
- handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings =
+ private def copy(
+ singletonName: String = singletonName,
+ role: Option[String] = role,
+ removalMargin: FiniteDuration = removalMargin,
+ handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings =
new ClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval)
}
@@ -121,9 +122,9 @@ object ClusterSingletonManager {
* Scala API: Factory method for `ClusterSingletonManager` [[akka.actor.Props]].
*/
def props(
- singletonProps: Props,
+ singletonProps: Props,
terminationMessage: Any,
- settings: ClusterSingletonManagerSettings): Props =
+ settings: ClusterSingletonManagerSettings): Props =
Props(new ClusterSingletonManager(singletonProps, terminationMessage, settings)).withDeploy(Deploy.local)
/**
@@ -364,20 +365,22 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess
* @param settings see [[ClusterSingletonManagerSettings]]
*/
class ClusterSingletonManager(
- singletonProps: Props,
+ singletonProps: Props,
terminationMessage: Any,
- settings: ClusterSingletonManagerSettings)
+ settings: ClusterSingletonManagerSettings)
extends Actor with FSM[ClusterSingletonManager.State, ClusterSingletonManager.Data] {
import ClusterSingletonManager.Internal._
import ClusterSingletonManager.Internal.OldestChangedBuffer._
import settings._
+ import FSM.`→`
val cluster = Cluster(context.system)
val selfAddressOption = Some(cluster.selfAddress)
import cluster.settings.LogInfo
- require(role.forall(cluster.selfRoles.contains),
+ require(
+ role.forall(cluster.selfRoles.contains),
s"This cluster member [${cluster.selfAddress}] doesn't have the role [$role]")
val removalMargin =
@@ -406,7 +409,7 @@ class ClusterSingletonManager(
var removed = Map.empty[Address, Deadline]
def addRemoved(address: Address): Unit =
- removed += address -> (Deadline.now + 15.minutes)
+ removed += address → (Deadline.now + 15.minutes)
def cleanupOverdueNotMemberAnyMore(): Unit = {
removed = removed filter { case (address, deadline) ⇒ deadline.hasTimeLeft }
@@ -514,7 +517,8 @@ class ClusterSingletonManager(
if (sender().path.address == previousOldest)
gotoOldest()
else {
- logInfo("Ignoring HandOverDone in BecomingOldest from [{}]. Expected previous oldest [{}]",
+ logInfo(
+ "Ignoring HandOverDone in BecomingOldest from [{}]. Expected previous oldest [{}]",
sender().path.address, previousOldest)
stay
}
@@ -538,7 +542,8 @@ class ClusterSingletonManager(
case Event(TakeOverFromMe, BecomingOldestData(Some(previousOldest))) ⇒
if (previousOldest == sender().path.address) sender() ! HandOverToMe
- else logInfo("Ignoring TakeOver request in BecomingOldest from [{}]. Expected previous oldest [{}]",
+ else logInfo(
+ "Ignoring TakeOver request in BecomingOldest from [{}]. Expected previous oldest [{}]",
sender().path.address, previousOldest)
stay
@@ -698,24 +703,24 @@ class ClusterSingletonManager(
}
onTransition {
- case from -> to ⇒ logInfo("ClusterSingletonManager state change [{} -> {}]", from, to)
+ case from → to ⇒ logInfo("ClusterSingletonManager state change [{} -> {}]", from, to)
}
onTransition {
- case _ -> BecomingOldest ⇒ setTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval, repeat = false)
+ case _ → BecomingOldest ⇒ setTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval, repeat = false)
}
onTransition {
- case BecomingOldest -> _ ⇒ cancelTimer(HandOverRetryTimer)
- case WasOldest -> _ ⇒ cancelTimer(TakeOverRetryTimer)
+ case BecomingOldest → _ ⇒ cancelTimer(HandOverRetryTimer)
+ case WasOldest → _ ⇒ cancelTimer(TakeOverRetryTimer)
}
onTransition {
- case _ -> (Younger | Oldest) ⇒ getNextOldestChanged()
+ case _ → (Younger | Oldest) ⇒ getNextOldestChanged()
}
onTransition {
- case _ -> (Younger | End) if removed.contains(cluster.selfAddress) ⇒
+ case _ → (Younger | End) if removed.contains(cluster.selfAddress) ⇒
logInfo("Self removed, stopping ClusterSingletonManager")
// note that FSM.stop() can't be used in onTransition
context.stop(self)
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala
index 8cd8599ebd..73a10983a0 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala
@@ -69,10 +69,10 @@ object ClusterSingletonProxySettings {
* immediately if the location of the singleton is unknown.
*/
final class ClusterSingletonProxySettings(
- val singletonName: String,
- val role: Option[String],
+ val singletonName: String,
+ val role: Option[String],
val singletonIdentificationInterval: FiniteDuration,
- val bufferSize: Int) extends NoSerializationVerificationNeeded {
+ val bufferSize: Int) extends NoSerializationVerificationNeeded {
require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000")
@@ -88,10 +88,11 @@ final class ClusterSingletonProxySettings(
def withBufferSize(bufferSize: Int): ClusterSingletonProxySettings =
copy(bufferSize = bufferSize)
- private def copy(singletonName: String = singletonName,
- role: Option[String] = role,
- singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval,
- bufferSize: Int = bufferSize): ClusterSingletonProxySettings =
+ private def copy(
+ singletonName: String = singletonName,
+ role: Option[String] = role,
+ singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval,
+ bufferSize: Int = bufferSize): ClusterSingletonProxySettings =
new ClusterSingletonProxySettings(singletonName, role, singletonIdentificationInterval, bufferSize)
}
@@ -252,7 +253,8 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste
singleton match {
case Some(s) ⇒
if (log.isDebugEnabled)
- log.debug("Forwarding message of type [{}] to current singleton instance at [{}]: {}",
+ log.debug(
+ "Forwarding message of type [{}] to current singleton instance at [{}]: {}",
Logging.simpleName(msg.getClass.getName), s.path)
s forward msg
case None ⇒
diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala
index 7a9ae66cb7..00e1233b75 100644
--- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala
+++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala
@@ -30,10 +30,10 @@ private[akka] class ClusterSingletonMessageSerializer(val system: ExtendedActorS
private val emptyByteArray = Array.empty[Byte]
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef](
- HandOverToMeManifest -> { _ ⇒ HandOverToMe },
- HandOverInProgressManifest -> { _ ⇒ HandOverInProgress },
- HandOverDoneManifest -> { _ ⇒ HandOverDone },
- TakeOverFromMeManifest -> { _ ⇒ TakeOverFromMe })
+ HandOverToMeManifest → { _ ⇒ HandOverToMe },
+ HandOverInProgressManifest → { _ ⇒ HandOverInProgress },
+ HandOverDoneManifest → { _ ⇒ HandOverDone },
+ TakeOverFromMeManifest → { _ ⇒ TakeOverFromMe })
override def manifest(obj: AnyRef): String = obj match {
case HandOverToMe ⇒ HandOverToMeManifest
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala
index 89c7554ead..f55cf493ee 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala
@@ -430,7 +430,8 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod
runOn(remainingServerRoleNames.toSeq: _*) {
Await.ready(system.whenTerminated, 20.seconds)
// start new system on same port
- val sys2 = ActorSystem(system.name,
+ val sys2 = ActorSystem(
+ system.name,
ConfigFactory.parseString("akka.remote.netty.tcp.port=" + Cluster(system).selfAddress.port.get)
.withFallback(system.settings.config))
Cluster(sys2).join(Cluster(sys2).selfAddress)
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala
index 3feca95d2f..09a761863b 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala
@@ -146,7 +146,7 @@ class DistributedPubSubMediatorSpec extends MultiNodeSpec(DistributedPubSubMedia
def createChatUser(name: String): ActorRef = {
var a = system.actorOf(Props(classOf[TestChatUser], mediator, testActor), name)
- chatUsers += (name -> a)
+ chatUsers += (name → a)
a
}
@@ -473,11 +473,11 @@ class DistributedPubSubMediatorSpec extends MultiNodeSpec(DistributedPubSubMedia
val deltaBuckets1 = expectMsgType[Delta].buckets
deltaBuckets1.map(_.content.size).sum should ===(500)
- mediator ! Status(versions = deltaBuckets1.map(b ⇒ b.owner -> b.version).toMap)
+ mediator ! Status(versions = deltaBuckets1.map(b ⇒ b.owner → b.version).toMap)
val deltaBuckets2 = expectMsgType[Delta].buckets
deltaBuckets1.map(_.content.size).sum should ===(500)
- mediator ! Status(versions = deltaBuckets2.map(b ⇒ b.owner -> b.version).toMap)
+ mediator ! Status(versions = deltaBuckets2.map(b ⇒ b.owner → b.version).toMap)
val deltaBuckets3 = expectMsgType[Delta].buckets
deltaBuckets3.map(_.content.size).sum should ===(10 + 9 + 2 + many - 500 - 500)
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala
index ec6889c482..c3fd74b90a 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala
@@ -77,10 +77,11 @@ class ClusterSingletonManagerChaosSpec extends MultiNodeSpec(ClusterSingletonMan
}
def createSingleton(): ActorRef = {
- system.actorOf(ClusterSingletonManager.props(
- singletonProps = Props(classOf[Echo], testActor),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ system.actorOf(
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Echo], testActor),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system)),
name = "echo")
}
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala
index 7ed25e9308..71652eee0f 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala
@@ -75,17 +75,19 @@ class ClusterSingletonManagerLeaveSpec extends MultiNodeSpec(ClusterSingletonMan
}
def createSingleton(): ActorRef = {
- system.actorOf(ClusterSingletonManager.props(
- singletonProps = Props(classOf[Echo], testActor),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ system.actorOf(
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Echo], testActor),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system)),
name = "echo")
}
lazy val echoProxy: ActorRef = {
- system.actorOf(ClusterSingletonProxy.props(
- singletonManagerPath = "/user/echo",
- settings = ClusterSingletonProxySettings(system)),
+ system.actorOf(
+ ClusterSingletonProxy.props(
+ singletonManagerPath = "/user/echo",
+ settings = ClusterSingletonProxySettings(system)),
name = "echoProxy")
}
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala
index 4085b6b96b..fe5ceab877 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala
@@ -211,19 +211,21 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS
def createSingleton(): ActorRef = {
//#create-singleton-manager
- system.actorOf(ClusterSingletonManager.props(
- singletonProps = Props(classOf[Consumer], queue, testActor),
- terminationMessage = End,
- settings = ClusterSingletonManagerSettings(system).withRole("worker")),
+ system.actorOf(
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Consumer], queue, testActor),
+ terminationMessage = End,
+ settings = ClusterSingletonManagerSettings(system).withRole("worker")),
name = "consumer")
//#create-singleton-manager
}
def createSingletonProxy(): ActorRef = {
//#create-singleton-proxy
- system.actorOf(ClusterSingletonProxy.props(
- singletonManagerPath = "/user/consumer",
- settings = ClusterSingletonProxySettings(system).withRole("worker")),
+ system.actorOf(
+ ClusterSingletonProxy.props(
+ singletonManagerPath = "/user/consumer",
+ settings = ClusterSingletonProxySettings(system).withRole("worker")),
name = "consumerProxy")
//#create-singleton-proxy
}
diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala
index 4c9baf948f..0b0b42b7a8 100644
--- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala
+++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala
@@ -69,17 +69,19 @@ class ClusterSingletonManagerStartupSpec extends MultiNodeSpec(ClusterSingletonM
}
def createSingleton(): ActorRef = {
- system.actorOf(ClusterSingletonManager.props(
- singletonProps = Props(classOf[Echo], testActor),
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system)),
+ system.actorOf(
+ ClusterSingletonManager.props(
+ singletonProps = Props(classOf[Echo], testActor),
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system)),
name = "echo")
}
lazy val echoProxy: ActorRef = {
- system.actorOf(ClusterSingletonProxy.props(
- singletonManagerPath = "/user/echo",
- settings = ClusterSingletonProxySettings(system)),
+ system.actorOf(
+ ClusterSingletonProxy.props(
+ singletonManagerPath = "/user/echo",
+ settings = ClusterSingletonProxySettings(system)),
name = "echoProxy")
}
diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala
index f05f5cabf6..ad21d3779c 100644
--- a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala
+++ b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala
@@ -30,11 +30,11 @@ class DistributedPubSubMessageSerializerSpec extends AkkaSpec {
val u2 = system.actorOf(Props.empty, "u2")
val u3 = system.actorOf(Props.empty, "u3")
val u4 = system.actorOf(Props.empty, "u4")
- checkSerialization(Status(Map(address1 -> 3, address2 -> 17, address3 -> 5)))
+ checkSerialization(Status(Map(address1 → 3, address2 → 17, address3 → 5)))
checkSerialization(Delta(List(
- Bucket(address1, 3, TreeMap("/user/u1" -> ValueHolder(2, Some(u1)), "/user/u2" -> ValueHolder(3, Some(u2)))),
- Bucket(address2, 17, TreeMap("/user/u3" -> ValueHolder(17, Some(u3)))),
- Bucket(address3, 5, TreeMap("/user/u4" -> ValueHolder(4, Some(u4)), "/user/u5" -> ValueHolder(5, None))))))
+ Bucket(address1, 3, TreeMap("/user/u1" → ValueHolder(2, Some(u1)), "/user/u2" → ValueHolder(3, Some(u2)))),
+ Bucket(address2, 17, TreeMap("/user/u3" → ValueHolder(17, Some(u3)))),
+ Bucket(address3, 5, TreeMap("/user/u4" → ValueHolder(4, Some(u4)), "/user/u5" → ValueHolder(5, None))))))
checkSerialization(Send("/user/u3", "hello", localAffinity = true))
checkSerialization(SendToAll("/user/u3", "hello", allButSelf = true))
checkSerialization(Publish("mytopic", "hello"))
diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala
index 0f75307ba0..005f7b608d 100644
--- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala
+++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala
@@ -38,14 +38,16 @@ object ClusterSingletonProxySpec {
joinTo.foreach(address ⇒ cluster.join(address))
cluster.registerOnMemberUp {
- system.actorOf(ClusterSingletonManager.props(
- singletonProps = Props[Singleton],
- terminationMessage = PoisonPill,
- settings = ClusterSingletonManagerSettings(system).withRemovalMargin(5.seconds)),
+ system.actorOf(
+ ClusterSingletonManager.props(
+ singletonProps = Props[Singleton],
+ terminationMessage = PoisonPill,
+ settings = ClusterSingletonManagerSettings(system).withRemovalMargin(5.seconds)),
name = "singletonManager")
}
- val proxy = system.actorOf(ClusterSingletonProxy.props("user/singletonManager",
+ val proxy = system.actorOf(ClusterSingletonProxy.props(
+ "user/singletonManager",
settings = ClusterSingletonProxySettings(system)), s"singletonProxy-${cluster.selfAddress.port.getOrElse(0)}")
def testProxy(msg: String) {
diff --git a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala
index b06b4bfc14..6274ae8d3a 100644
--- a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala
@@ -142,7 +142,7 @@ private[cluster] abstract class AutoDownBase(autoDownUnreachableAfter: FiniteDur
downOrAddPending(node)
} else {
val task = scheduler.scheduleOnce(autoDownUnreachableAfter, self, UnreachableTimeout(node))
- scheduledUnreachable += (node -> task)
+ scheduledUnreachable += (node → task)
}
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
index 013ce73485..d8ec2cd930 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala
@@ -115,8 +115,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
*/
private[cluster] val scheduler: Scheduler = {
if (system.scheduler.maxFrequency < 1.second / SchedulerTickDuration) {
- logInfo("Using a dedicated scheduler for cluster. Default scheduler can be used if configured " +
- "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].",
+ logInfo(
+ "Using a dedicated scheduler for cluster. Default scheduler can be used if configured " +
+ "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].",
(1000 / system.scheduler.maxFrequency).toInt, SchedulerTickDuration.toMillis)
val cfg = ConfigFactory.parseString(
@@ -127,9 +128,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
case tf ⇒ tf
}
system.dynamicAccess.createInstanceFor[Scheduler](system.settings.SchedulerClass, immutable.Seq(
- classOf[Config] -> cfg,
- classOf[LoggingAdapter] -> log,
- classOf[ThreadFactory] -> threadFactory)).get
+ classOf[Config] → cfg,
+ classOf[LoggingAdapter] → log,
+ classOf[ThreadFactory] → threadFactory)).get
} else {
// delegate to system.scheduler, but don't close over system
val systemScheduler = system.scheduler
@@ -142,8 +143,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable =
systemScheduler.schedule(initialDelay, interval, runnable)
- override def scheduleOnce(delay: FiniteDuration,
- runnable: Runnable)(implicit executor: ExecutionContext): Cancellable =
+ override def scheduleOnce(
+ delay: FiniteDuration,
+ runnable: Runnable)(implicit executor: ExecutionContext): Cancellable =
systemScheduler.scheduleOnce(delay, runnable)
}
}
@@ -228,7 +230,8 @@ class Cluster(val system: ExtendedActorSystem) extends Extension {
*/
@varargs def subscribe(subscriber: ActorRef, initialStateMode: SubscriptionInitialStateMode, to: Class[_]*): Unit = {
require(to.length > 0, "at least one `ClusterDomainEvent` class is required")
- require(to.forall(classOf[ClusterDomainEvent].isAssignableFrom),
+ require(
+ to.forall(classOf[ClusterDomainEvent].isAssignableFrom),
s"subscribe to `akka.cluster.ClusterEvent.ClusterDomainEvent` or subclasses, was [${to.map(_.getName).mkString(", ")}]")
clusterCore ! InternalClusterAction.Subscribe(subscriber, initialStateMode, to.toSet)
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala
index b621cfcc46..382edd62bb 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala
@@ -21,9 +21,9 @@ import com.typesafe.config.ConfigFactory
* the `ClusterActorRefProvider` is used.
*/
private[akka] class ClusterActorRefProvider(
- _systemName: String,
- _settings: ActorSystem.Settings,
- _eventStream: EventStream,
+ _systemName: String,
+ _settings: ActorSystem.Settings,
+ _eventStream: EventStream,
_dynamicAccess: DynamicAccess) extends RemoteActorRefProvider(
_systemName, _settings, _eventStream, _dynamicAccess) {
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala
index f9320ba74f..d8c613114a 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala
@@ -274,15 +274,18 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with
import context.dispatcher
// start periodic gossip to random nodes in cluster
- val gossipTask = scheduler.schedule(PeriodicTasksInitialDelay.max(GossipInterval),
+ val gossipTask = scheduler.schedule(
+ PeriodicTasksInitialDelay.max(GossipInterval),
GossipInterval, self, GossipTick)
// start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list)
- val failureDetectorReaperTask = scheduler.schedule(PeriodicTasksInitialDelay.max(UnreachableNodesReaperInterval),
+ val failureDetectorReaperTask = scheduler.schedule(
+ PeriodicTasksInitialDelay.max(UnreachableNodesReaperInterval),
UnreachableNodesReaperInterval, self, ReapUnreachableTick)
// start periodic leader action management (only applies for the current leader)
- val leaderActionsTask = scheduler.schedule(PeriodicTasksInitialDelay.max(LeaderActionsInterval),
+ val leaderActionsTask = scheduler.schedule(
+ PeriodicTasksInitialDelay.max(LeaderActionsInterval),
LeaderActionsInterval, self, LeaderActionsTick)
// start periodic publish of current stats
@@ -377,7 +380,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with
case ClusterUserAction.JoinTo(address) ⇒
logInfo("Trying to join [{}] when already part of a cluster, ignoring", address)
case JoinSeedNodes(seedNodes) ⇒
- logInfo("Trying to join seed nodes [{}] when already part of a cluster, ignoring",
+ logInfo(
+ "Trying to join seed nodes [{}] when already part of a cluster, ignoring",
seedNodes.mkString(", "))
}
@@ -433,10 +437,12 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with
*/
def join(address: Address): Unit = {
if (address.protocol != selfAddress.protocol)
- log.warning("Trying to join member with wrong protocol, but was ignored, expected [{}] but was [{}]",
+ log.warning(
+ "Trying to join member with wrong protocol, but was ignored, expected [{}] but was [{}]",
selfAddress.protocol, address.protocol)
else if (address.system != selfAddress.system)
- log.warning("Trying to join member with wrong ActorSystem name, but was ignored, expected [{}] but was [{}]",
+ log.warning(
+ "Trying to join member with wrong ActorSystem name, but was ignored, expected [{}] but was [{}]",
selfAddress.system, address.system)
else {
require(latestGossip.members.isEmpty, "Join can only be done from empty state")
@@ -476,10 +482,12 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with
def joining(node: UniqueAddress, roles: Set[String]): Unit = {
val selfStatus = latestGossip.member(selfUniqueAddress).status
if (node.address.protocol != selfAddress.protocol)
- log.warning("Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]",
+ log.warning(
+ "Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]",
selfAddress.protocol, node.address.protocol)
else if (node.address.system != selfAddress.system)
- log.warning("Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]",
+ log.warning(
+ "Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]",
selfAddress.system, node.address.system)
else if (Gossip.removeUnreachableWithMemberStatus.contains(selfStatus))
logInfo("Trying to join [{}] to [{}] member, ignoring. Use a member that is Up instead.", node, selfStatus)
@@ -613,7 +621,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with
val newOverview = localGossip.overview copy (reachability = newReachability)
val newGossip = localGossip copy (overview = newOverview)
updateLatestGossip(newGossip)
- log.warning("Cluster Node [{}] - Marking node as TERMINATED [{}], due to quarantine. Node roles [{}]",
+ log.warning(
+ "Cluster Node [{}] - Marking node as TERMINATED [{}], due to quarantine. Node roles [{}]",
selfAddress, node.address, selfRoles.mkString(","))
publish(latestGossip)
downing(node.address)
@@ -840,7 +849,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with
leaderActionCounter += 1
if (leaderActionCounter == firstNotice || leaderActionCounter % periodicNotice == 0)
- logInfo("Leader can currently not perform its duties, reachability status: [{}], member status: [{}]",
+ logInfo(
+ "Leader can currently not perform its duties, reachability status: [{}], member status: [{}]",
latestGossip.reachabilityExcludingDownedObservers,
latestGossip.members.map(m ⇒
s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}").mkString(", "))
@@ -1047,7 +1057,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef) extends Actor with
if (nonExiting.nonEmpty)
log.warning("Cluster Node [{}] - Marking node(s) as UNREACHABLE [{}]. Node roles [{}]", selfAddress, nonExiting.mkString(", "), selfRoles.mkString(", "))
if (exiting.nonEmpty)
- logInfo("Marking exiting node(s) as UNREACHABLE [{}]. This is expected and they will be removed.",
+ logInfo(
+ "Marking exiting node(s) as UNREACHABLE [{}]. This is expected and they will be removed.",
exiting.mkString(", "))
if (newlyDetectedReachableMembers.nonEmpty)
logInfo("Marking node(s) as REACHABLE [{}]. Node roles [{}]", newlyDetectedReachableMembers.mkString(", "), selfRoles.mkString(","))
@@ -1294,10 +1305,10 @@ private[cluster] class OnMemberStatusChangedListener(callback: Runnable, status:
@SerialVersionUID(1L)
private[cluster] final case class GossipStats(
receivedGossipCount: Long = 0L,
- mergeCount: Long = 0L,
- sameCount: Long = 0L,
- newerCount: Long = 0L,
- olderCount: Long = 0L) {
+ mergeCount: Long = 0L,
+ sameCount: Long = 0L,
+ newerCount: Long = 0L,
+ olderCount: Long = 0L) {
def incrementMergeCount(): GossipStats =
copy(mergeCount = mergeCount + 1, receivedGossipCount = receivedGossipCount + 1)
@@ -1337,5 +1348,5 @@ private[cluster] final case class GossipStats(
@SerialVersionUID(1L)
private[cluster] final case class VectorClockStats(
versionSize: Int = 0,
- seenLatest: Int = 0)
+ seenLatest: Int = 0)
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
index ac93008d31..72852ad5fe 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
@@ -56,10 +56,10 @@ object ClusterEvent {
* Current snapshot state of the cluster. Sent to new subscriber.
*/
final case class CurrentClusterState(
- members: immutable.SortedSet[Member] = immutable.SortedSet.empty,
- unreachable: Set[Member] = Set.empty,
- seenBy: Set[Address] = Set.empty,
- leader: Option[Address] = None,
+ members: immutable.SortedSet[Member] = immutable.SortedSet.empty,
+ unreachable: Set[Member] = Set.empty,
+ seenBy: Set[Address] = Set.empty,
+ leader: Option[Address] = None,
roleLeaderMap: Map[String, Option[Address]] = Map.empty) {
/**
@@ -395,7 +395,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto
unreachable = unreachable,
seenBy = latestGossip.seenBy.map(_.address),
leader = latestGossip.leader(selfUniqueAddress).map(_.address),
- roleLeaderMap = latestGossip.allRoles.map(r ⇒ r -> latestGossip.roleLeader(r, selfUniqueAddress)
+ roleLeaderMap = latestGossip.allRoles.map(r ⇒ r → latestGossip.roleLeader(r, selfUniqueAddress)
.map(_.address))(collection.breakOut))
receiver ! state
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala
index f192c9314b..6279e44928 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala
@@ -76,7 +76,8 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg
failureDetector)
// start periodic heartbeat to other nodes in cluster
- val heartbeatTask = scheduler.schedule(PeriodicTasksInitialDelay max HeartbeatInterval,
+ val heartbeatTask = scheduler.schedule(
+ PeriodicTasksInitialDelay max HeartbeatInterval,
HeartbeatInterval, self, HeartbeatTick)
override def preStart(): Unit = {
@@ -173,9 +174,9 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg
* It is immutable, but it updates the failureDetector.
*/
private[cluster] final case class ClusterHeartbeatSenderState(
- ring: HeartbeatNodeRing,
+ ring: HeartbeatNodeRing,
oldReceiversNowUnreachable: Set[UniqueAddress],
- failureDetector: FailureDetectorRegistry[Address]) {
+ failureDetector: FailureDetectorRegistry[Address]) {
val activeReceivers: Set[UniqueAddress] = ring.myReceivers union oldReceiversNowUnreachable
@@ -241,9 +242,9 @@ private[cluster] final case class ClusterHeartbeatSenderState(
* It is immutable, i.e. the methods return new instances.
*/
private[cluster] final case class HeartbeatNodeRing(
- selfAddress: UniqueAddress,
- nodes: Set[UniqueAddress],
- unreachable: Set[UniqueAddress],
+ selfAddress: UniqueAddress,
+ nodes: Set[UniqueAddress],
+ unreachable: Set[UniqueAddress],
monitoredByNrOfMembers: Int) {
require(nodes contains selfAddress, s"nodes [${nodes.mkString(", ")}] must contain selfAddress [${selfAddress}]")
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala
index 941fdb8ba8..cc8cb30bf5 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala
@@ -69,13 +69,15 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto
/**
* Start periodic gossip to random nodes in cluster
*/
- val gossipTask = scheduler.schedule(PeriodicTasksInitialDelay max MetricsGossipInterval,
+ val gossipTask = scheduler.schedule(
+ PeriodicTasksInitialDelay max MetricsGossipInterval,
MetricsGossipInterval, self, GossipTick)
/**
* Start periodic metrics collection
*/
- val metricsTask = scheduler.schedule(PeriodicTasksInitialDelay max MetricsInterval,
+ val metricsTask = scheduler.schedule(
+ PeriodicTasksInitialDelay max MetricsInterval,
MetricsInterval, self, MetricsTick)
override def preStart(): Unit = {
@@ -535,11 +537,11 @@ object StandardMetrics {
*/
@SerialVersionUID(1L)
final case class Cpu(
- address: Address,
- timestamp: Long,
+ address: Address,
+ timestamp: Long,
systemLoadAverage: Option[Double],
- cpuCombined: Option[Double],
- processors: Int) {
+ cpuCombined: Option[Double],
+ processors: Int) {
cpuCombined match {
case Some(x) ⇒ require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]")
@@ -607,7 +609,8 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics
import StandardMetrics._
private def this(cluster: Cluster) =
- this(cluster.selfAddress,
+ this(
+ cluster.selfAddress,
EWMA.alpha(cluster.settings.MetricsMovingAverageHalfLife, cluster.settings.MetricsInterval))
/**
@@ -710,7 +713,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: AnyRef
import StandardMetrics._
private def this(cluster: Cluster) =
- this(cluster.selfAddress,
+ this(
+ cluster.selfAddress,
EWMA.alpha(cluster.settings.MetricsMovingAverageHalfLife, cluster.settings.MetricsInterval),
cluster.system.dynamicAccess.createInstanceFor[AnyRef]("org.hyperic.sigar.Sigar", Nil).get)
@@ -804,7 +808,7 @@ private[cluster] object MetricsCollector {
}
} else {
- system.dynamicAccess.createInstanceFor[MetricsCollector](fqcn, List(classOf[ActorSystem] -> system)).
+ system.dynamicAccess.createInstanceFor[MetricsCollector](fqcn, List(classOf[ActorSystem] → system)).
recover {
case e ⇒ throw new ConfigurationException("Could not create custom metrics collector [" + fqcn + "] due to:" + e.toString)
}.get
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala
index 0dc643a4d9..5db167ed24 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala
@@ -69,12 +69,13 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable {
val newUnreachable =
if (_state.unreachable.contains(event.member)) _state.unreachable - event.member + event.member
else _state.unreachable
- _state = _state.copy(members = _state.members - event.member + event.member,
+ _state = _state.copy(
+ members = _state.members - event.member + event.member,
unreachable = newUnreachable)
case LeaderChanged(leader) ⇒
_state = _state.copy(leader = leader)
case RoleLeaderChanged(role, leader) ⇒
- _state = _state.copy(roleLeaderMap = _state.roleLeaderMap + (role -> leader))
+ _state = _state.copy(roleLeaderMap = _state.roleLeaderMap + (role → leader))
case stats: CurrentInternalStats ⇒ _latestStats = stats
case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes
case ClusterShuttingDown ⇒
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala
index 00d64a5eca..8fb729930a 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala
@@ -21,9 +21,9 @@ private[cluster] object ClusterRemoteWatcher {
* Factory method for `ClusterRemoteWatcher` [[akka.actor.Props]].
*/
def props(
- failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
heartbeatExpectedResponseAfter: FiniteDuration): Props =
Props(classOf[ClusterRemoteWatcher], failureDetector, heartbeatInterval, unreachableReaperInterval,
heartbeatExpectedResponseAfter).withDeploy(Deploy.local)
@@ -41,9 +41,9 @@ private[cluster] object ClusterRemoteWatcher {
* of the cluster and then later becomes cluster member.
*/
private[cluster] class ClusterRemoteWatcher(
- failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
heartbeatExpectedResponseAfter: FiniteDuration)
extends RemoteWatcher(
failureDetector,
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala
index 0cb6686173..682a7cb849 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala
@@ -97,7 +97,7 @@ final class ClusterSettings(val config: Config, val systemName: String) {
val MinNrOfMembersOfRole: Map[String, Int] = {
import scala.collection.JavaConverters._
cc.getConfig("role").root.asScala.collect {
- case (key, value: ConfigObject) ⇒ key -> value.toConfig.getInt("min-nr-of-members")
+ case (key, value: ConfigObject) ⇒ key → value.toConfig.getInt("min-nr-of-members")
}.toMap
}
val JmxEnabled: Boolean = cc.getBoolean("jmx.enabled")
diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala
index 30df8e6e2d..404cdff676 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala
@@ -60,9 +60,9 @@ private[cluster] object Gossip {
*/
@SerialVersionUID(1L)
private[cluster] final case class Gossip(
- members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address
- overview: GossipOverview = GossipOverview(),
- version: VectorClock = VectorClock()) { // vector clock version
+ members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address
+ overview: GossipOverview = GossipOverview(),
+ version: VectorClock = VectorClock()) { // vector clock version
if (Cluster.isAssertInvariantsEnabled) assertInvariants()
@@ -84,7 +84,7 @@ private[cluster] final case class Gossip(
}
@transient private lazy val membersMap: Map[UniqueAddress, Member] =
- members.map(m ⇒ m.uniqueAddress -> m)(collection.breakOut)
+ members.map(m ⇒ m.uniqueAddress → m)(collection.breakOut)
/**
* Increments the version for this 'Node'.
@@ -142,7 +142,8 @@ private[cluster] final case class Gossip(
val mergedMembers = Gossip.emptyMembers union Member.pickHighestPriority(this.members, that.members)
// 3. merge reachability table by picking records with highest version
- val mergedReachability = this.overview.reachability.merge(mergedMembers.map(_.uniqueAddress),
+ val mergedReachability = this.overview.reachability.merge(
+ mergedMembers.map(_.uniqueAddress),
that.overview.reachability)
// 4. Nobody can have seen this new gossip yet
@@ -200,7 +201,8 @@ private[cluster] final case class Gossip(
def isSingletonCluster: Boolean = members.size == 1
def member(node: UniqueAddress): Member = {
- membersMap.getOrElse(node,
+ membersMap.getOrElse(
+ node,
Member.removed(node)) // placeholder for removed member
}
@@ -227,8 +229,8 @@ private[cluster] final case class Gossip(
*/
@SerialVersionUID(1L)
private[cluster] final case class GossipOverview(
- seen: Set[UniqueAddress] = Set.empty,
- reachability: Reachability = Reachability.empty) {
+ seen: Set[UniqueAddress] = Set.empty,
+ reachability: Reachability = Reachability.empty) {
override def toString =
s"GossipOverview(reachability = [$reachability], seen = [${seen.mkString(", ")}])"
@@ -252,11 +254,11 @@ object GossipEnvelope {
*/
@SerialVersionUID(2L)
private[cluster] class GossipEnvelope private (
- val from: UniqueAddress,
- val to: UniqueAddress,
- @volatile var g: Gossip,
- serDeadline: Deadline,
- @transient @volatile var ser: () ⇒ Gossip) extends ClusterMessage {
+ val from: UniqueAddress,
+ val to: UniqueAddress,
+ @volatile var g: Gossip,
+ serDeadline: Deadline,
+ @transient @volatile var ser:() ⇒ Gossip) extends ClusterMessage {
def gossip: Gossip = {
deserialize()
diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala
index 3f55ef5317..bd3f817587 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Member.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala
@@ -15,10 +15,10 @@ import MemberStatus._
*/
@SerialVersionUID(1L)
class Member private[cluster] (
- val uniqueAddress: UniqueAddress,
+ val uniqueAddress: UniqueAddress,
private[cluster] val upNumber: Int, // INTERNAL API
- val status: MemberStatus,
- val roles: Set[String]) extends Serializable {
+ val status: MemberStatus,
+ val roles: Set[String]) extends Serializable {
def address: Address = uniqueAddress.address
@@ -53,7 +53,8 @@ class Member private[cluster] (
val oldStatus = this.status
if (status == oldStatus) this
else {
- require(allowedTransitions(oldStatus)(status),
+ require(
+ allowedTransitions(oldStatus)(status),
s"Invalid member status transition [ ${this} -> ${status}]")
new Member(uniqueAddress, upNumber, status, roles)
}
@@ -233,13 +234,13 @@ object MemberStatus {
*/
private[cluster] val allowedTransitions: Map[MemberStatus, Set[MemberStatus]] =
Map(
- Joining -> Set(WeaklyUp, Up, Down, Removed),
- WeaklyUp -> Set(Up, Down, Removed),
- Up -> Set(Leaving, Down, Removed),
- Leaving -> Set(Exiting, Down, Removed),
- Down -> Set(Removed),
- Exiting -> Set(Removed, Down),
- Removed -> Set.empty[MemberStatus])
+ Joining → Set(WeaklyUp, Up, Down, Removed),
+ WeaklyUp → Set(Up, Down, Removed),
+ Up → Set(Leaving, Down, Removed),
+ Leaving → Set(Exiting, Down, Removed),
+ Down → Set(Removed),
+ Exiting → Set(Removed, Down),
+ Removed → Set.empty[MemberStatus])
}
/**
diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala
index 57ced59f72..c978cf8e74 100644
--- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala
@@ -47,7 +47,7 @@ private[cluster] object Reachability {
*/
@SerialVersionUID(1L)
private[cluster] class Reachability private (
- val records: immutable.IndexedSeq[Reachability.Record],
+ val records: immutable.IndexedSeq[Reachability.Record],
val versions: Map[UniqueAddress, Long]) extends Serializable {
import Reachability._
@@ -67,10 +67,10 @@ private[cluster] class Reachability private (
records foreach { r ⇒
val m = mapBuilder.get(r.observer) match {
- case None ⇒ Map(r.subject -> r)
+ case None ⇒ Map(r.subject → r)
case Some(m) ⇒ m.updated(r.subject, r)
}
- mapBuilder += (r.observer -> m)
+ mapBuilder += (r.observer → m)
if (r.status == Unreachable) unreachableBuilder += r.subject
else if (r.status == Terminated) terminatedBuilder += r.subject
@@ -167,7 +167,7 @@ private[cluster] class Reachability private (
}
if (observerVersion2 > observerVersion1)
- newVersions += (observer -> observerVersion2)
+ newVersions += (observer → observerVersion2)
}
newVersions = newVersions.filterNot { case (k, _) ⇒ !allowed(k) }
@@ -242,7 +242,7 @@ private[cluster] class Reachability private (
case (subject, records) if records.exists(_.status == Unreachable) ⇒
val observers: Set[UniqueAddress] =
records.collect { case r if r.status == Unreachable ⇒ r.observer }(breakOut)
- (subject -> observers)
+ (subject → observers)
}
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala
index 8318e126ce..78921cc0f3 100644
--- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala
@@ -37,27 +37,28 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri
private lazy val GossipTimeToLive = Cluster(system).settings.GossipTimeToLive
private val fromBinaryMap = collection.immutable.HashMap[Class[_ <: ClusterMessage], Array[Byte] ⇒ AnyRef](
- classOf[InternalClusterAction.Join] -> {
+ classOf[InternalClusterAction.Join] → {
case bytes ⇒
val m = cm.Join.parseFrom(bytes)
- InternalClusterAction.Join(uniqueAddressFromProto(m.getNode),
+ InternalClusterAction.Join(
+ uniqueAddressFromProto(m.getNode),
Set.empty[String] ++ m.getRolesList.asScala)
},
- classOf[InternalClusterAction.Welcome] -> {
+ classOf[InternalClusterAction.Welcome] → {
case bytes ⇒
val m = cm.Welcome.parseFrom(decompress(bytes))
InternalClusterAction.Welcome(uniqueAddressFromProto(m.getFrom), gossipFromProto(m.getGossip))
},
- classOf[ClusterUserAction.Leave] -> (bytes ⇒ ClusterUserAction.Leave(addressFromBinary(bytes))),
- classOf[ClusterUserAction.Down] -> (bytes ⇒ ClusterUserAction.Down(addressFromBinary(bytes))),
- InternalClusterAction.InitJoin.getClass -> (_ ⇒ InternalClusterAction.InitJoin),
- classOf[InternalClusterAction.InitJoinAck] -> (bytes ⇒ InternalClusterAction.InitJoinAck(addressFromBinary(bytes))),
- classOf[InternalClusterAction.InitJoinNack] -> (bytes ⇒ InternalClusterAction.InitJoinNack(addressFromBinary(bytes))),
- classOf[ClusterHeartbeatSender.Heartbeat] -> (bytes ⇒ ClusterHeartbeatSender.Heartbeat(addressFromBinary(bytes))),
- classOf[ClusterHeartbeatSender.HeartbeatRsp] -> (bytes ⇒ ClusterHeartbeatSender.HeartbeatRsp(uniqueAddressFromBinary(bytes))),
- classOf[GossipStatus] -> gossipStatusFromBinary,
- classOf[GossipEnvelope] -> gossipEnvelopeFromBinary,
- classOf[MetricsGossipEnvelope] -> metricsGossipEnvelopeFromBinary)
+ classOf[ClusterUserAction.Leave] → (bytes ⇒ ClusterUserAction.Leave(addressFromBinary(bytes))),
+ classOf[ClusterUserAction.Down] → (bytes ⇒ ClusterUserAction.Down(addressFromBinary(bytes))),
+ InternalClusterAction.InitJoin.getClass → (_ ⇒ InternalClusterAction.InitJoin),
+ classOf[InternalClusterAction.InitJoinAck] → (bytes ⇒ InternalClusterAction.InitJoinAck(addressFromBinary(bytes))),
+ classOf[InternalClusterAction.InitJoinNack] → (bytes ⇒ InternalClusterAction.InitJoinNack(addressFromBinary(bytes))),
+ classOf[ClusterHeartbeatSender.Heartbeat] → (bytes ⇒ ClusterHeartbeatSender.Heartbeat(addressFromBinary(bytes))),
+ classOf[ClusterHeartbeatSender.HeartbeatRsp] → (bytes ⇒ ClusterHeartbeatSender.HeartbeatRsp(uniqueAddressFromBinary(bytes))),
+ classOf[GossipStatus] → gossipStatusFromBinary,
+ classOf[GossipEnvelope] → gossipEnvelopeFromBinary,
+ classOf[MetricsGossipEnvelope] → metricsGossipEnvelopeFromBinary)
def includeManifest: Boolean = true
@@ -164,20 +165,20 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri
UniqueAddress(addressFromProto(uniqueAddress.getAddress), uniqueAddress.getUid)
private val memberStatusToInt = scala.collection.immutable.HashMap[MemberStatus, Int](
- MemberStatus.Joining -> cm.MemberStatus.Joining_VALUE,
- MemberStatus.Up -> cm.MemberStatus.Up_VALUE,
- MemberStatus.Leaving -> cm.MemberStatus.Leaving_VALUE,
- MemberStatus.Exiting -> cm.MemberStatus.Exiting_VALUE,
- MemberStatus.Down -> cm.MemberStatus.Down_VALUE,
- MemberStatus.Removed -> cm.MemberStatus.Removed_VALUE,
- MemberStatus.WeaklyUp -> cm.MemberStatus.WeaklyUp_VALUE)
+ MemberStatus.Joining → cm.MemberStatus.Joining_VALUE,
+ MemberStatus.Up → cm.MemberStatus.Up_VALUE,
+ MemberStatus.Leaving → cm.MemberStatus.Leaving_VALUE,
+ MemberStatus.Exiting → cm.MemberStatus.Exiting_VALUE,
+ MemberStatus.Down → cm.MemberStatus.Down_VALUE,
+ MemberStatus.Removed → cm.MemberStatus.Removed_VALUE,
+ MemberStatus.WeaklyUp → cm.MemberStatus.WeaklyUp_VALUE)
private val memberStatusFromInt = memberStatusToInt.map { case (a, b) ⇒ (b, a) }
private val reachabilityStatusToInt = scala.collection.immutable.HashMap[Reachability.ReachabilityStatus, Int](
- Reachability.Reachable -> cm.ReachabilityStatus.Reachable_VALUE,
- Reachability.Unreachable -> cm.ReachabilityStatus.Unreachable_VALUE,
- Reachability.Terminated -> cm.ReachabilityStatus.Terminated_VALUE)
+ Reachability.Reachable → cm.ReachabilityStatus.Reachable_VALUE,
+ Reachability.Unreachable → cm.ReachabilityStatus.Unreachable_VALUE,
+ Reachability.Terminated → cm.ReachabilityStatus.Terminated_VALUE)
private val reachabilityStatusFromInt = reachabilityStatusToInt.map { case (a, b) ⇒ (b, a) }
@@ -310,7 +311,8 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri
}
private def gossipStatusFromProto(status: cm.GossipStatus): GossipStatus =
- GossipStatus(uniqueAddressFromProto(status.getFrom), vectorClockFromProto(status.getVersion,
+ GossipStatus(uniqueAddressFromProto(status.getFrom), vectorClockFromProto(
+ status.getVersion,
status.getAllHashesList.asScala.toVector))
private def metricsGossipEnvelopeToProto(envelope: MetricsGossipEnvelope): cm.MetricsGossipEnvelope = {
@@ -381,7 +383,8 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri
case NumberType.Float_VALUE ⇒ jl.Float.intBitsToFloat(number.getValue32)
case NumberType.Integer_VALUE ⇒ number.getValue32
case NumberType.Serialized_VALUE ⇒
- val in = new ClassLoaderObjectInputStream(system.dynamicAccess.classLoader,
+ val in = new ClassLoaderObjectInputStream(
+ system.dynamicAccess.classLoader,
new ByteArrayInputStream(number.getSerialized.toByteArray))
val obj = in.readObject
in.close()
diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala
index bc326938bc..eafe050647 100644
--- a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala
@@ -131,15 +131,16 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS
@SerialVersionUID(1L)
@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class AdaptiveLoadBalancingPool(
- metricsSelector: MetricsSelector = MixMetricsSelector,
- override val nrOfInstances: Int = 0,
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ override val nrOfInstances: Int = 0,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
- override val usePoolDispatcher: Boolean = false)
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
extends Pool {
def this(config: Config, dynamicAccess: DynamicAccess) =
- this(nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
+ this(
+ nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config),
metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
usePoolDispatcher = config.hasPath("pool-dispatcher"))
@@ -159,7 +160,8 @@ final case class AdaptiveLoadBalancingPool(
new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector))
override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
- Some(Props(classOf[AdaptiveLoadBalancingMetricsListener],
+ Some(Props(
+ classOf[AdaptiveLoadBalancingMetricsListener],
routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
/**
@@ -212,13 +214,14 @@ final case class AdaptiveLoadBalancingPool(
@SerialVersionUID(1L)
@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class AdaptiveLoadBalancingGroup(
- metricsSelector: MetricsSelector = MixMetricsSelector,
- override val paths: immutable.Iterable[String] = Nil,
- override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ override val paths: immutable.Iterable[String] = Nil,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config, dynamicAccess: DynamicAccess) =
- this(metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
+ this(
+ metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
paths = immutableSeq(config.getStringList("routees.paths")))
/**
@@ -228,8 +231,9 @@ final case class AdaptiveLoadBalancingGroup(
* @param routeesPaths string representation of the actor paths of the routees, messages are
* sent with [[akka.actor.ActorSelection]] to these paths
*/
- def this(metricsSelector: MetricsSelector,
- routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths))
+ def this(
+ metricsSelector: MetricsSelector,
+ routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths))
override def paths(system: ActorSystem): immutable.Iterable[String] = this.paths
@@ -237,7 +241,8 @@ final case class AdaptiveLoadBalancingGroup(
new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector))
override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
- Some(Props(classOf[AdaptiveLoadBalancingMetricsListener],
+ Some(Props(
+ classOf[AdaptiveLoadBalancingMetricsListener],
routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
/**
@@ -363,9 +368,9 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe
combined.foldLeft(Map.empty[Address, (Double, Int)].withDefaultValue((0.0, 0))) {
case (acc, (address, capacity)) ⇒
val (sum, count) = acc(address)
- acc + (address -> ((sum + capacity, count + 1)))
+ acc + (address → ((sum + capacity, count + 1)))
}.map {
- case (addr, (sum, count)) ⇒ (addr -> sum / count)
+ case (addr, (sum, count)) ⇒ addr → (sum / count)
}
}
@@ -380,7 +385,7 @@ object MetricsSelector {
case "cpu" ⇒ CpuMetricsSelector
case "load" ⇒ SystemLoadAverageMetricsSelector
case fqn ⇒
- val args = List(classOf[Config] -> config)
+ val args = List(classOf[Config] → config)
dynamicAccess.createInstanceFor[MetricsSelector](fqn, args).recover({
case exception ⇒ throw new IllegalArgumentException(
(s"Cannot instantiate metrics-selector [$fqn], " +
@@ -430,7 +435,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector {
val (_, min) = capacity.minBy { case (_, c) ⇒ c }
// lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero
val divisor = math.max(0.01, min)
- capacity map { case (addr, c) ⇒ (addr -> math.round((c) / divisor).toInt) }
+ capacity map { case (addr, c) ⇒ (addr → math.round((c) / divisor).toInt) }
}
}
diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala
index 99e42cc056..4a1f556d13 100644
--- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala
@@ -43,10 +43,10 @@ object ClusterRouterGroupSettings {
*/
@SerialVersionUID(1L)
final case class ClusterRouterGroupSettings(
- totalInstances: Int,
- routeesPaths: immutable.Seq[String],
+ totalInstances: Int,
+ routeesPaths: immutable.Seq[String],
allowLocalRoutees: Boolean,
- useRole: Option[String]) extends ClusterRouterSettingsBase {
+ useRole: Option[String]) extends ClusterRouterSettingsBase {
/**
* Java API
@@ -82,10 +82,10 @@ object ClusterRouterPoolSettings {
*/
@SerialVersionUID(1L)
final case class ClusterRouterPoolSettings(
- totalInstances: Int,
+ totalInstances: Int,
maxInstancesPerNode: Int,
- allowLocalRoutees: Boolean,
- useRole: Option[String]) extends ClusterRouterSettingsBase {
+ allowLocalRoutees: Boolean,
+ useRole: Option[String]) extends ClusterRouterSettingsBase {
/**
* Java API
@@ -276,9 +276,9 @@ private[akka] class ClusterRouterPoolActor(
} else {
// find the node with least routees
val numberOfRouteesPerNode: Map[Address, Int] =
- currentRoutees.foldLeft(currentNodes.map(_ -> 0).toMap.withDefaultValue(0)) { (acc, x) ⇒
+ currentRoutees.foldLeft(currentNodes.map(_ → 0).toMap.withDefaultValue(0)) { (acc, x) ⇒
val address = fullAddress(x)
- acc + (address -> (acc(address) + 1))
+ acc + (address → (acc(address) + 1))
}
val (address, count) = numberOfRouteesPerNode.minBy(_._2)
@@ -304,7 +304,7 @@ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSett
var usedRouteePaths: Map[Address, Set[String]] =
if (settings.allowLocalRoutees)
- Map(cluster.selfAddress -> settings.routeesPaths.toSet)
+ Map(cluster.selfAddress → settings.routeesPaths.toSet)
else
Map.empty
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala
index 4a098f49a3..eb81ea9c77 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala
@@ -39,7 +39,7 @@ abstract class DeterministicOldestWhenJoiningSpec
// reverse order because that expose the bug in issue #18554
def seedNodes: immutable.IndexedSeq[Address] =
Vector(address(seed1), address(seed2), address(seed3)).sorted(Member.addressOrdering).reverse
- val roleByAddress = Map(address(seed1) -> seed1, address(seed2) -> seed2, address(seed3) -> seed3)
+ val roleByAddress = Map(address(seed1) → seed1, address(seed2) → seed2, address(seed3) → seed3)
"Joining a cluster" must {
"result in deterministic oldest node" taggedAs LongRunningTest in {
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
index fd125879cb..064ec57774 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
@@ -96,7 +96,8 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
def muteLog(sys: ActorSystem = system): Unit = {
if (!sys.log.isDebugEnabled) {
- Seq(".*Metrics collection has started successfully.*",
+ Seq(
+ ".*Metrics collection has started successfully.*",
".*Metrics will be retreived from MBeans.*",
".*Cluster Node.* - registered cluster JMX MBean.*",
".*Cluster Node.* - is starting up.*",
@@ -272,7 +273,8 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
val expectedLeader = roleOfLeader(nodesInCluster)
val leader = clusterView.leader
val isLeader = leader == Some(clusterView.selfAddress)
- assert(isLeader == isNode(expectedLeader),
+ assert(
+ isLeader == isNode(expectedLeader),
"expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members))
clusterView.status should (be(MemberStatus.Up) or be(MemberStatus.Leaving))
}
@@ -282,9 +284,9 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
* Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring.
*/
def awaitMembersUp(
- numberOfMembers: Int,
- canNotBePartOfMemberRing: Set[Address] = Set.empty,
- timeout: FiniteDuration = 25.seconds): Unit = {
+ numberOfMembers: Int,
+ canNotBePartOfMemberRing: Set[Address] = Set.empty,
+ timeout: FiniteDuration = 25.seconds): Unit = {
within(timeout) {
if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set
awaitAssert(canNotBePartOfMemberRing foreach (a ⇒ clusterView.members.map(_.address) should not contain (a)))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala
index 9ba4b6828d..14ecede33a 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala
@@ -50,7 +50,8 @@ abstract class RestartFirstSeedNodeSpec
def missingSeed = address(seed3).copy(port = Some(61313))
def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2, seed3, missingSeed)
- lazy val restartedSeed1System = ActorSystem(system.name,
+ lazy val restartedSeed1System = ActorSystem(
+ system.name,
ConfigFactory.parseString("akka.remote.netty.tcp.port=" + seedNodes.head.port.get).
withFallback(system.settings.config))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala
index b01d2507cd..6489a9abd4 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala
@@ -52,7 +52,8 @@ abstract class RestartNode2SpecSpec
def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2)
// this is the node that will attempt to re-join, keep gate times low so it can retry quickly
- lazy val restartedSeed1System = ActorSystem(system.name,
+ lazy val restartedSeed1System = ActorSystem(
+ system.name,
ConfigFactory.parseString(
s"""
akka.remote.netty.tcp.port= ${seedNodes.head.port.get}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala
index 7d4b4930b2..58e8a42fbc 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala
@@ -48,7 +48,8 @@ abstract class RestartNode3Spec
def seedNodes: immutable.IndexedSeq[Address] = Vector(first)
- lazy val restartedSecondSystem = ActorSystem(system.name,
+ lazy val restartedSecondSystem = ActorSystem(
+ system.name,
ConfigFactory.parseString("akka.remote.netty.tcp.port=" + secondUniqueAddress.address.port.get).
withFallback(system.settings.config))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala
index c6f3efe285..17cad3e2b6 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala
@@ -69,7 +69,8 @@ abstract class RestartNodeSpec
def seedNodes: immutable.IndexedSeq[Address] = Vector(first, secondUniqueAddress.address, third)
- lazy val restartedSecondSystem = ActorSystem(system.name,
+ lazy val restartedSecondSystem = ActorSystem(
+ system.name,
ConfigFactory.parseString("akka.remote.netty.tcp.port=" + secondUniqueAddress.address.port.get).
withFallback(system.settings.config))
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
index ac0539c2e7..1551253ed0 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
@@ -208,13 +208,15 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
val convergenceWithinFactor = getDouble("convergence-within-factor")
val exerciseActors = getBoolean("exercise-actors")
- require(numberOfSeedNodes + numberOfNodesJoiningToSeedNodesInitially + numberOfNodesJoiningOneByOneSmall +
- numberOfNodesJoiningOneByOneLarge + numberOfNodesJoiningToOneNode + numberOfNodesJoiningToSeedNodes <= totalNumberOfNodes,
+ require(
+ numberOfSeedNodes + numberOfNodesJoiningToSeedNodesInitially + numberOfNodesJoiningOneByOneSmall +
+ numberOfNodesJoiningOneByOneLarge + numberOfNodesJoiningToOneNode + numberOfNodesJoiningToSeedNodes <= totalNumberOfNodes,
s"specified number of joining nodes <= ${totalNumberOfNodes}")
// don't shutdown the 3 nodes hosting the master actors
- require(numberOfNodesLeavingOneByOneSmall + numberOfNodesLeavingOneByOneLarge + numberOfNodesLeaving +
- numberOfNodesShutdownOneByOneSmall + numberOfNodesShutdownOneByOneLarge + numberOfNodesShutdown <= totalNumberOfNodes - 3,
+ require(
+ numberOfNodesLeavingOneByOneSmall + numberOfNodesLeavingOneByOneLarge + numberOfNodesLeaving +
+ numberOfNodesShutdownOneByOneSmall + numberOfNodesShutdownOneByOneLarge + numberOfNodesShutdown <= totalNumberOfNodes - 3,
s"specified number of leaving/shutdown nodes <= ${totalNumberOfNodes - 3}")
require(numberOfNodesJoinRemove <= totalNumberOfNodes, s"nr-of-nodes-join-remove should be <= ${totalNumberOfNodes}")
@@ -229,8 +231,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
}
final case class ClusterResult(
- address: Address,
- duration: Duration,
+ address: Address,
+ duration: Duration,
clusterStats: GossipStats)
final case class AggregatedClusterResult(title: String, duration: Duration, clusterStats: GossipStats)
@@ -271,8 +273,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
def receive = {
case ClusterMetricsChanged(clusterMetrics) ⇒ nodeMetrics = clusterMetrics
- case PhiResult(from, phiValues) ⇒ phiValuesObservedByNode += from -> phiValues
- case StatsResult(from, stats) ⇒ clusterStatsObservedByNode += from -> stats
+ case PhiResult(from, phiValues) ⇒ phiValuesObservedByNode += from → phiValues
+ case StatsResult(from, stats) ⇒ clusterStatsObservedByNode += from → stats
case ReportTick ⇒
if (infolog)
log.info(s"[${title}] in progress\n${formatMetrics}\n\n${formatPhi}\n\n${formatStats}")
@@ -412,7 +414,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
val φ = phi(node)
if (φ > 0 || cluster.failureDetector.isMonitoring(node)) {
val aboveOne = if (!φ.isInfinite && φ > 1.0) 1 else 0
- phiByNode += node -> PhiValue(node, previous.countAboveOne + aboveOne, previous.count + 1,
+ phiByNode += node → PhiValue(node, previous.countAboveOne + aboveOne, previous.count + 1,
math.max(previous.max, φ))
}
}
@@ -561,7 +563,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
}
def send(job: Job): Unit = {
- outstanding += job.id -> JobState(Deadline.now + retryTimeout, job)
+ outstanding += job.id → JobState(Deadline.now + retryTimeout, job)
sendCounter += 1
workers ! job
}
@@ -576,7 +578,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
case TreeJob(id, payload, idx, levels, width) ⇒
// create the actors when first TreeJob message is received
val totalActors = ((width * math.pow(width, levels) - 1) / (width - 1)).toInt
- log.debug("Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children",
+ log.debug(
+ "Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children",
totalActors, levels, width)
val tree = context.actorOf(Props(classOf[TreeNode], levels, width), "tree")
tree forward ((idx, SimpleJob(id, payload)))
@@ -633,7 +636,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
case e: Exception ⇒ context.children foreach { _ ! e }
case GetChildrenCount ⇒ sender() ! ChildrenCount(context.children.size, restartCount)
case Reset ⇒
- require(context.children.isEmpty,
+ require(
+ context.children.isEmpty,
s"ResetChildrenCount not allowed when children exists, [${context.children.size}]")
restartCount = 0
}
@@ -772,7 +776,8 @@ abstract class StressSpec
def createResultAggregator(title: String, expectedResults: Int, includeInHistory: Boolean): Unit = {
runOn(roles.head) {
- val aggregator = system.actorOf(Props(classOf[ClusterResultAggregator], title, expectedResults, settings).withDeploy(Deploy.local),
+ val aggregator = system.actorOf(
+ Props(classOf[ClusterResultAggregator], title, expectedResults, settings).withDeploy(Deploy.local),
name = "result" + step)
if (includeInHistory && infolog) aggregator ! ReportTo(Some(clusterResultHistory))
else aggregator ! ReportTo(None)
@@ -1027,7 +1032,8 @@ abstract class StressSpec
val (masterRoles, otherRoles) = roles.take(nbrUsedRoles).splitAt(3)
runOn(masterRoles: _*) {
reportResult {
- val m = system.actorOf(Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local),
+ val m = system.actorOf(
+ Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local),
name = masterName)
m ! Begin
import system.dispatcher
@@ -1155,7 +1161,8 @@ abstract class StressSpec
"start routers that are running while nodes are joining" taggedAs LongRunningTest in {
runOn(roles.take(3): _*) {
- system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
+ system.actorOf(
+ Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
name = masterName) ! Begin
}
}
@@ -1253,7 +1260,8 @@ abstract class StressSpec
"start routers that are running while nodes are removed" taggedAs LongRunningTest in {
if (exerciseActors) {
runOn(roles.take(3): _*) {
- system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
+ system.actorOf(
+ Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
name = masterName) ! Begin
}
}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala
index aae1a13dc2..5584b89b49 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala
@@ -99,11 +99,11 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa
Await.result(router ? GetRoutees, timeout.duration).asInstanceOf[Routees].routees
def receiveReplies(expectedReplies: Int): Map[Address, Int] = {
- val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0)
+ val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0)
(receiveWhile(5 seconds, messages = expectedReplies) {
case Reply(address) ⇒ address
}).foldLeft(zero) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
}
@@ -116,10 +116,11 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa
}
def startRouter(name: String): ActorRef = {
- val router = system.actorOf(ClusterRouterPool(
- local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
- settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
- props(Props[Echo]),
+ val router = system.actorOf(
+ ClusterRouterPool(
+ local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
+ settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
+ props(Props[Echo]),
name)
// it may take some time until router receives cluster member events
awaitAssert { currentRoutees(router).size should ===(roles.size) }
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala
index 2d54c6e176..5425f4d2ce 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala
@@ -73,8 +73,10 @@ abstract class ClusterConsistentHashingGroupSpec extends MultiNodeSpec(ClusterCo
case s: String ⇒ s
}
val paths = List("/user/dest")
- val router = system.actorOf(ClusterRouterGroup(local = ConsistentHashingGroup(paths, hashMapping = hashMapping),
- settings = ClusterRouterGroupSettings(totalInstances = 10, paths, allowLocalRoutees = true, useRole = None)).props(),
+ val router = system.actorOf(
+ ClusterRouterGroup(
+ local = ConsistentHashingGroup(paths, hashMapping = hashMapping),
+ settings = ClusterRouterGroupSettings(totalInstances = 10, paths, allowLocalRoutees = true, useRole = None)).props(),
"router")
// it may take some time until router receives cluster member events
awaitAssert { currentRoutees(router).size should ===(3) }
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala
index d68a50d581..e26e2e5ba9 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala
@@ -121,9 +121,11 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC
"deploy programatically defined routees to the member nodes in the cluster" taggedAs LongRunningTest in {
runOn(first) {
- val router2 = system.actorOf(ClusterRouterPool(local = ConsistentHashingPool(nrOfInstances = 0),
- settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = None)).
- props(Props[Echo]),
+ val router2 = system.actorOf(
+ ClusterRouterPool(
+ local = ConsistentHashingPool(nrOfInstances = 0),
+ settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = None)).
+ props(Props[Echo]),
"router2")
// it may take some time until router receives cluster member events
awaitAssert { currentRoutees(router2).size should ===(6) }
@@ -154,10 +156,11 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC
case s: String ⇒ s
}
- val router4 = system.actorOf(ClusterRouterPool(
- local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping),
- settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
- props(Props[Echo]),
+ val router4 = system.actorOf(
+ ClusterRouterPool(
+ local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping),
+ settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
+ props(Props[Echo]),
"router4")
assertHashMapping(router4)
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala
index bebd5ce25f..b11f96d531 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala
@@ -103,20 +103,22 @@ abstract class ClusterRoundRobinSpec extends MultiNodeSpec(ClusterRoundRobinMult
import ClusterRoundRobinMultiJvmSpec._
lazy val router1 = system.actorOf(FromConfig.props(Props[SomeActor]), "router1")
- lazy val router2 = system.actorOf(ClusterRouterPool(RoundRobinPool(nrOfInstances = 0),
- ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
- props(Props[SomeActor]),
+ lazy val router2 = system.actorOf(
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 0),
+ ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
+ props(Props[SomeActor]),
"router2")
lazy val router3 = system.actorOf(FromConfig.props(Props[SomeActor]), "router3")
lazy val router4 = system.actorOf(FromConfig.props(), "router4")
lazy val router5 = system.actorOf(RoundRobinPool(nrOfInstances = 0).props(Props[SomeActor]), "router5")
def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = {
- val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0)
+ val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0)
(receiveWhile(5 seconds, messages = expectedReplies) {
case Reply(`routeeType`, ref) ⇒ fullAddress(ref)
}).foldLeft(zero) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
}
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala
index f7732e6bf7..a96c8f51a3 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala
@@ -63,11 +63,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
import akka.cluster.routing.UseRoleIgnoredMultiJvmSpec._
def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = {
- val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0)
+ val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0)
(receiveWhile(5 seconds, messages = expectedReplies) {
case Reply(`routeeType`, ref) ⇒ fullAddress(ref)
}).foldLeft(zero) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
}
@@ -101,10 +101,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("b")
- val router = system.actorOf(ClusterRouterPool(
- RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = false, useRole = role)).
- props(Props[SomeActor]),
+ val router = system.actorOf(
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = false, useRole = role)).
+ props(Props[SomeActor]),
"router-2")
awaitAssert(currentRoutees(router).size should ===(4))
@@ -130,7 +131,8 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("b")
- val router = system.actorOf(ClusterRouterGroup(
+ val router = system.actorOf(
+ ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = false, useRole = role)).props,
@@ -159,10 +161,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("b")
- val router = system.actorOf(ClusterRouterPool(
- RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)).
- props(Props[SomeActor]),
+ val router = system.actorOf(
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)).
+ props(Props[SomeActor]),
"router-3")
awaitAssert(currentRoutees(router).size should ===(4))
@@ -188,7 +191,8 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("b")
- val router = system.actorOf(ClusterRouterGroup(
+ val router = system.actorOf(
+ ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRole = role)).props,
@@ -217,10 +221,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("a")
- val router = system.actorOf(ClusterRouterPool(
- RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)).
- props(Props[SomeActor]),
+ val router = system.actorOf(
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)).
+ props(Props[SomeActor]),
"router-4")
awaitAssert(currentRoutees(router).size should ===(2))
@@ -246,7 +251,8 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("a")
- val router = system.actorOf(ClusterRouterGroup(
+ val router = system.actorOf(
+ ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRole = role)).props,
@@ -275,10 +281,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("c")
- val router = system.actorOf(ClusterRouterPool(
- RoundRobinPool(nrOfInstances = 6),
- ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)).
- props(Props[SomeActor]),
+ val router = system.actorOf(
+ ClusterRouterPool(
+ RoundRobinPool(nrOfInstances = 6),
+ ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRole = role)).
+ props(Props[SomeActor]),
"router-5")
awaitAssert(currentRoutees(router).size should ===(6))
@@ -304,7 +311,8 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp
runOn(first) {
val role = Some("c")
- val router = system.actorOf(ClusterRouterGroup(
+ val router = system.actorOf(
+ ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true, useRole = role)).props,
diff --git a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala
index fb2a9c9359..f9bea50285 100644
--- a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala
@@ -22,7 +22,7 @@ object AutoDownSpec {
class AutoDownTestActor(
autoDownUnreachableAfter: FiniteDuration,
- probe: ActorRef)
+ probe: ActorRef)
extends AutoDownBase(autoDownUnreachableAfter) {
override def selfAddress = memberA.address
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala
index f8072dcd98..0a0db52a6f 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala
@@ -134,14 +134,16 @@ class ClusterDomainEventSpec extends WordSpec with Matchers {
val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, dLeaving, eJoining))
val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining))
diffRolesLeader(g0, g1, selfDummyAddress) should ===(
- Set(RoleLeaderChanged("AA", Some(aUp.address)),
+ Set(
+ RoleLeaderChanged("AA", Some(aUp.address)),
RoleLeaderChanged("AB", Some(aUp.address)),
RoleLeaderChanged("BB", Some(bUp.address)),
RoleLeaderChanged("DD", Some(dLeaving.address)),
RoleLeaderChanged("DE", Some(dLeaving.address)),
RoleLeaderChanged("EE", Some(eUp.address))))
diffRolesLeader(g1, g2, selfDummyAddress) should ===(
- Set(RoleLeaderChanged("AA", None),
+ Set(
+ RoleLeaderChanged("AA", None),
RoleLeaderChanged("AB", Some(bUp.address)),
RoleLeaderChanged("DE", Some(eJoining.address))))
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala
index 8d02ea0867..3c30f72232 100644
--- a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala
@@ -93,7 +93,7 @@ class EWMASpec extends AkkaSpec(MetricsEnabledSpec.config) with MetricsCollector
} else None
}
}
- streamingDataSet ++= changes.map(m ⇒ m.name -> m)
+ streamingDataSet ++= changes.map(m ⇒ m.name → m)
}
}
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala
index e83da0971e..8414f91824 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala
@@ -98,7 +98,7 @@ class ReachabilitySpec extends WordSpec with Matchers {
Reachability.Record(nodeC, nodeB, Unreachable, 2),
Reachability.Record(nodeA, nodeD, Unreachable, 3),
Reachability.Record(nodeD, nodeB, Terminated, 4))
- val versions = Map(nodeA -> 3L, nodeC -> 3L, nodeD -> 4L)
+ val versions = Map(nodeA → 3L, nodeC → 3L, nodeD → 4L)
val r = Reachability(records, versions)
r.status(nodeA) should ===(Reachable)
r.status(nodeB) should ===(Terminated)
@@ -136,9 +136,9 @@ class ReachabilitySpec extends WordSpec with Matchers {
r.allUnreachableFrom(nodeD) should ===(Set(nodeA, nodeB))
r.observersGroupedByUnreachable should ===(Map(
- nodeA -> Set(nodeB, nodeC, nodeD),
- nodeB -> Set(nodeD),
- nodeE -> Set(nodeA)))
+ nodeA → Set(nodeB, nodeC, nodeD),
+ nodeB → Set(nodeD),
+ nodeE → Set(nodeA)))
}
"merge by picking latest version of each record" in {
@@ -199,11 +199,11 @@ class ReachabilitySpec extends WordSpec with Matchers {
}
"merge versions correctly" in {
- val r1 = Reachability(Vector.empty, Map(nodeA -> 3L, nodeB -> 5L, nodeC -> 7L))
- val r2 = Reachability(Vector.empty, Map(nodeA -> 6L, nodeB -> 2L, nodeD -> 1L))
+ val r1 = Reachability(Vector.empty, Map(nodeA → 3L, nodeB → 5L, nodeC → 7L))
+ val r2 = Reachability(Vector.empty, Map(nodeA → 6L, nodeB → 2L, nodeD → 1L))
val merged = r1.merge(Set(nodeA, nodeB, nodeC, nodeD, nodeE), r2)
- val expected = Map(nodeA -> 6L, nodeB -> 5L, nodeC -> 7L, nodeD -> 1L)
+ val expected = Map(nodeA → 6L, nodeB → 5L, nodeC → 7L, nodeD → 1L)
merged.versions should ===(expected)
val merged2 = r2.merge(Set(nodeA, nodeB, nodeC, nodeD, nodeE), r1)
diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
index a3daa63d7e..8ef6d7e938 100644
--- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
@@ -73,8 +73,10 @@ class ClusterMessageSerializerSpec extends AkkaSpec(
checkSerialization(InternalClusterAction.Welcome(uniqueAddress, g2))
- val mg = MetricsGossip(Set(NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))),
- NodeMetrics(b1.address, 4712, Set(Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))),
+ val mg = MetricsGossip(Set(
+ NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))),
+ NodeMetrics(b1.address, 4712, Set(
+ Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))),
Metric("bar1", Double.MinPositiveValue, None),
Metric("bar2", Float.MaxValue, None),
Metric("bar3", Int.MaxValue, None),
diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala
index f08c69e511..235958db52 100644
--- a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala
@@ -62,15 +62,15 @@ class MetricsSelectorSpec extends WordSpec with Matchers {
"CapacityMetricsSelector" must {
"calculate weights from capacity" in {
- val capacity = Map(a1 -> 0.6, b1 -> 0.3, c1 -> 0.1)
+ val capacity = Map(a1 → 0.6, b1 → 0.3, c1 → 0.1)
val weights = abstractSelector.weights(capacity)
- weights should ===(Map(c1 -> 1, b1 -> 3, a1 -> 6))
+ weights should ===(Map(c1 → 1, b1 → 3, a1 → 6))
}
"handle low and zero capacity" in {
- val capacity = Map(a1 -> 0.0, b1 -> 1.0, c1 -> 0.005, d1 -> 0.004)
+ val capacity = Map(a1 → 0.0, b1 → 1.0, c1 → 0.005, d1 → 0.004)
val weights = abstractSelector.weights(capacity)
- weights should ===(Map(a1 -> 0, b1 -> 100, c1 -> 1, d1 -> 0))
+ weights should ===(Map(a1 → 0, b1 → 100, c1 → 1, d1 → 0))
}
}
diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala
index e09923323c..224394fdae 100644
--- a/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala
@@ -30,7 +30,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString("""
"WeightedRoutees" must {
"allocate weighted routees" in {
- val weights = Map(a1 -> 1, b1 -> 3, c1 -> 10)
+ val weights = Map(a1 → 1, b1 → 3, c1 → 10)
val weighted = new WeightedRoutees(routees, a1, weights)
weighted(1) should ===(routeeA)
@@ -46,7 +46,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString("""
empty.total
}
- val empty2 = new WeightedRoutees(Vector(routeeA), a1, Map(a1 -> 0))
+ val empty2 = new WeightedRoutees(Vector(routeeA), a1, Map(a1 → 0))
empty2.isEmpty should ===(true)
intercept[IllegalArgumentException] {
empty2.total
@@ -66,7 +66,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString("""
}
"allocate routees for undefined weight" in {
- val weights = Map(a1 -> 1, b1 -> 7)
+ val weights = Map(a1 → 1, b1 → 7)
val weighted = new WeightedRoutees(routees, a1, weights)
weighted(1) should ===(routeeA)
@@ -77,7 +77,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString("""
}
"allocate weighted local routees" in {
- val weights = Map(a1 -> 2, b1 -> 1, c1 -> 10)
+ val weights = Map(a1 → 2, b1 → 1, c1 → 10)
val routees2 = Vector(testActorRoutee, routeeB, routeeC)
val weighted = new WeightedRoutees(routees2, a1, weights)
@@ -86,7 +86,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString("""
}
"not allocate ref with weight zero" in {
- val weights = Map(a1 -> 0, b1 -> 2, c1 -> 10)
+ val weights = Map(a1 → 0, b1 → 2, c1 → 10)
val weighted = new WeightedRoutees(routees, a1, weights)
1 to weighted.total foreach { weighted(_) should not be (routeeA) }
diff --git a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala
index 64e24e2054..9cd348c7a1 100644
--- a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala
+++ b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala
@@ -39,13 +39,14 @@ object CircuitBreakerProxy {
* @param failureMap function to map a failure into a response message. The failing response message is wrapped
* into a [[akka.contrib.circuitbreaker.CircuitBreakerProxy.CircuitOpenFailure]] object
*/
- def props(target: ActorRef,
- maxFailures: Int,
- callTimeout: Timeout,
- resetTimeout: Timeout,
- circuitEventListener: Option[ActorRef],
- failureDetector: Any ⇒ Boolean,
- failureMap: CircuitOpenFailure ⇒ Any) =
+ def props(
+ target: ActorRef,
+ maxFailures: Int,
+ callTimeout: Timeout,
+ resetTimeout: Timeout,
+ circuitEventListener: Option[ActorRef],
+ failureDetector: Any ⇒ Boolean,
+ failureMap: CircuitOpenFailure ⇒ Any) =
Props(new CircuitBreakerProxy(target, maxFailures, callTimeout, resetTimeout, circuitEventListener, failureDetector, failureMap))
sealed trait CircuitBreakerCommand
@@ -70,8 +71,8 @@ object CircuitBreakerProxy {
final case class CircuitBreakerPropsBuilder(
maxFailures: Int, callTimeout: Timeout, resetTimeout: Timeout,
- circuitEventListener: Option[ActorRef] = None,
- failureDetector: Any ⇒ Boolean = { _ ⇒ false },
+ circuitEventListener: Option[ActorRef] = None,
+ failureDetector: Any ⇒ Boolean = { _ ⇒ false },
openCircuitFailureConverter: CircuitOpenFailure ⇒ Any = identity) {
def withMaxFailures(value: Int) = copy(maxFailures = value)
@@ -100,15 +101,16 @@ object CircuitBreakerProxy {
import akka.contrib.circuitbreaker.CircuitBreakerProxy._
final class CircuitBreakerProxy(
- target: ActorRef,
- maxFailures: Int,
- callTimeout: Timeout,
- resetTimeout: Timeout,
+ target: ActorRef,
+ maxFailures: Int,
+ callTimeout: Timeout,
+ resetTimeout: Timeout,
circuitEventListener: Option[ActorRef],
- failureDetector: Any ⇒ Boolean,
- failureMap: CircuitOpenFailure ⇒ Any) extends Actor with ActorLogging with FSM[CircuitBreakerState, CircuitBreakerStateData] {
+ failureDetector: Any ⇒ Boolean,
+ failureMap: CircuitOpenFailure ⇒ Any) extends Actor with ActorLogging with FSM[CircuitBreakerState, CircuitBreakerStateData] {
import CircuitBreakerInternalEvents._
+ import FSM.`→`
context watch target
@@ -222,20 +224,23 @@ final class CircuitBreakerProxy(
val isFailure = failureDetector(response)
if (isFailure) {
- log.debug("Response '{}' is considered as failure sending self-message to ask incrementing failure count (origin state was {})",
+ log.debug(
+ "Response '{}' is considered as failure sending self-message to ask incrementing failure count (origin state was {})",
response, state)
self ! CallFailed
} else {
- log.debug("Request '{}' succeeded with response {}, returning response to sender {} and sending message to ask to reset failure count (origin state was {})",
+ log.debug(
+ "Request '{}' succeeded with response {}, returning response to sender {} and sending message to ask to reset failure count (origin state was {})",
message, response, currentSender, state)
self ! CallSucceeded
}
case Failure(reason) ⇒
- log.debug("Request '{}' to target {} failed with exception {}, sending self-message to ask incrementing failure count (origin state was {})",
+ log.debug(
+ "Request '{}' to target {} failed with exception {}, sending self-message to ask incrementing failure count (origin state was {})",
message, target, reason, state)
self ! CallFailed
@@ -243,15 +248,15 @@ final class CircuitBreakerProxy(
}
onTransition {
- case from -> Closed ⇒
+ case from → Closed ⇒
log.debug("Moving from state {} to state CLOSED", from)
circuitEventListener foreach { _ ! CircuitClosed(self) }
- case from -> HalfOpen ⇒
+ case from → HalfOpen ⇒
log.debug("Moving from state {} to state HALF OPEN", from)
circuitEventListener foreach { _ ! CircuitHalfOpen(self) }
- case from -> Open ⇒
+ case from → Open ⇒
log.debug("Moving from state {} to state OPEN", from)
circuitEventListener foreach { _ ! CircuitOpen(self) }
}
diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala
index dea049e2e4..963a5d1eb0 100644
--- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala
+++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala
@@ -227,6 +227,7 @@ import ReliableProxy._
class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration,
reconnectAfter: Option[FiniteDuration], maxConnectAttempts: Option[Int])
extends Actor with LoggingFSM[State, Vector[Message]] with ReliableProxyDebugLogging {
+ import FSM.`→`
var tunnel: ActorRef = _
var currentSerial: Int = 0
@@ -284,9 +285,9 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration,
}
onTransition {
- case _ -> Active ⇒ scheduleTick()
- case Active -> Idle ⇒ cancelTimer(resendTimer)
- case _ -> Connecting ⇒ scheduleReconnectTick()
+ case _ → Active ⇒ scheduleTick()
+ case Active → Idle ⇒ cancelTimer(resendTimer)
+ case _ → Connecting ⇒ scheduleReconnectTick()
}
when(Active) {
diff --git a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala
index 51b204b765..5ef9e3c70a 100644
--- a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala
+++ b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala
@@ -109,9 +109,10 @@ private[throttle] object TimerBasedThrottler {
final case class Message(message: Any, sender: ActorRef)
// The data of the FSM
- final case class Data(target: Option[ActorRef],
- callsLeftInThisPeriod: Int,
- queue: Q[Message])
+ final case class Data(
+ target: Option[ActorRef],
+ callsLeftInThisPeriod: Int,
+ queue: Q[Message])
}
/**
@@ -214,6 +215,8 @@ private[throttle] object TimerBasedThrottler {
* @see [[akka.contrib.throttle.Throttler]]
*/
class TimerBasedThrottler(var rate: Rate) extends Actor with FSM[State, Data] {
+ import FSM.`→`
+
startWith(Idle, Data(None, rate.numberOfCalls, Q()))
// Idle: no messages, or target not set
@@ -277,8 +280,8 @@ class TimerBasedThrottler(var rate: Rate) extends Actor with FSM[State, Data] {
}
onTransition {
- case Idle -> Active ⇒ startTimer(rate)
- case Active -> Idle ⇒ stopTimer()
+ case Idle → Active ⇒ startTimer(rate)
+ case Active → Idle ⇒ stopTimer()
}
initialize()
diff --git a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala
index 8ba86e42df..b98ee01f77 100644
--- a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala
@@ -21,8 +21,8 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen {
callTimeout = 200 millis,
resetTimeout = 1 second,
failureDetector = {
- _ == "FAILURE"
- })
+ _ == "FAILURE"
+ })
trait CircuitBreakerScenario {
val sender = TestProbe()
diff --git a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala
index 8e5dc3aac3..7edea90210 100644
--- a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala
@@ -64,11 +64,11 @@ class CircuitBreaker(potentiallyFailingService: ActorRef) extends Actor with Act
CircuitBreakerPropsBuilder(maxFailures = 3, callTimeout = 2.seconds, resetTimeout = 30.seconds)
.copy(
failureDetector = {
- _ match {
- case Response(Left(_)) ⇒ true
- case _ ⇒ false
- }
- })
+ _ match {
+ case Response(Left(_)) ⇒ true
+ case _ ⇒ false
+ }
+ })
.props(potentiallyFailingService),
"serviceCircuitBreaker")
@@ -106,15 +106,15 @@ class CircuitBreakerAsk(potentiallyFailingService: ActorRef) extends Actor with
CircuitBreakerPropsBuilder(maxFailures = 3, callTimeout = askTimeout, resetTimeout = 30.seconds)
.copy(
failureDetector = {
- _ match {
- case Response(Left(_)) ⇒ true
- case _ ⇒ false
- }
- })
+ _ match {
+ case Response(Left(_)) ⇒ true
+ case _ ⇒ false
+ }
+ })
.copy(
openCircuitFailureConverter = { failure ⇒
- Left(s"Circuit open when processing ${failure.failedMsg}")
- })
+ Left(s"Circuit open when processing ${failure.failedMsg}")
+ })
.props(potentiallyFailingService),
"serviceCircuitBreaker")
diff --git a/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala b/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala
index 2fdbcc5c6e..065480f9b1 100644
--- a/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala
@@ -121,7 +121,8 @@ object MyApp extends App {
}
"""))
- val myActor = system.actorOf(Props[MyActor].withDispatcher("peek-dispatcher"),
+ val myActor = system.actorOf(
+ Props[MyActor].withDispatcher("peek-dispatcher"),
name = "myActor")
myActor ! "Hello"
diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala
index 31a826f587..0e7b968da4 100644
--- a/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala
@@ -26,8 +26,9 @@ case object MoneyMarket extends AccountType
final case class GetCustomerAccountBalances(id: Long, accountTypes: Set[AccountType])
final case class GetAccountBalances(id: Long)
-final case class AccountBalances(accountType: AccountType,
- balance: Option[List[(Long, BigDecimal)]])
+final case class AccountBalances(
+ accountType: AccountType,
+ balance: Option[List[(Long, BigDecimal)]])
final case class CheckingAccountBalances(balances: Option[List[(Long, BigDecimal)]])
final case class SavingsAccountBalances(balances: Option[List[(Long, BigDecimal)]])
@@ -69,8 +70,9 @@ class AccountBalanceRetriever extends Actor with Aggregator {
}
//#initial-expect
- class AccountAggregator(originalSender: ActorRef,
- id: Long, types: Set[AccountType]) {
+ class AccountAggregator(
+ originalSender: ActorRef,
+ id: Long, types: Set[AccountType]) {
val results =
mutable.ArrayBuffer.empty[(AccountType, Option[List[(Long, BigDecimal)]])]
@@ -95,7 +97,7 @@ class AccountBalanceRetriever extends Actor with Aggregator {
context.actorOf(Props[CheckingAccountProxy]) ! GetAccountBalances(id)
expectOnce {
case CheckingAccountBalances(balances) ⇒
- results += (Checking -> balances)
+ results += (Checking → balances)
collectBalances()
}
}
@@ -105,7 +107,7 @@ class AccountBalanceRetriever extends Actor with Aggregator {
context.actorOf(Props[SavingsAccountProxy]) ! GetAccountBalances(id)
expectOnce {
case SavingsAccountBalances(balances) ⇒
- results += (Savings -> balances)
+ results += (Savings → balances)
collectBalances()
}
}
@@ -114,7 +116,7 @@ class AccountBalanceRetriever extends Actor with Aggregator {
context.actorOf(Props[MoneyMarketAccountProxy]) ! GetAccountBalances(id)
expectOnce {
case MoneyMarketAccountBalances(balances) ⇒
- results += (MoneyMarket -> balances)
+ results += (MoneyMarket → balances)
collectBalances()
}
}
diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala
index 6035755862..77b2a2ff4c 100644
--- a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala
@@ -409,10 +409,10 @@ object MixinSample extends App {
//#mixin-model
val texts = Map(
- "that.rug_EN" -> "That rug really tied the room together.",
- "your.opinion_EN" -> "Yeah, well, you know, that's just, like, your opinion, man.",
- "that.rug_ES" -> "Esa alfombra realmente completaba la sala.",
- "your.opinion_ES" -> "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.")
+ "that.rug_EN" → "That rug really tied the room together.",
+ "your.opinion_EN" → "Yeah, well, you know, that's just, like, your opinion, man.",
+ "that.rug_ES" → "Esa alfombra realmente completaba la sala.",
+ "your.opinion_ES" → "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.")
case class I18nText(locale: String, key: String)
case class Message(author: Option[String], text: Any)
diff --git a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala
index 6b6f7a0800..2242f26462 100644
--- a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala
+++ b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala
@@ -46,7 +46,8 @@ class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSp
//#demo-code
val printer = system.actorOf(Props[PrintActor])
// The throttler for this example, setting the rate
- val throttler = system.actorOf(Props(classOf[TimerBasedThrottler],
+ val throttler = system.actorOf(Props(
+ classOf[TimerBasedThrottler],
3 msgsPer 1.second))
// Set the target
throttler ! SetTarget(Some(printer))
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
index 2b135780ab..df8d3460e0 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala
@@ -83,8 +83,8 @@ final class GCounter private[akka] (
else state.get(key) match {
case Some(v) ⇒
val tot = v + delta
- assignAncestor(new GCounter(state + (key -> tot)))
- case None ⇒ assignAncestor(new GCounter(state + (key -> delta)))
+ assignAncestor(new GCounter(state + (key → tot)))
+ case None ⇒ assignAncestor(new GCounter(state + (key → delta)))
}
}
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala
index 6ca4c8824d..86ee1f5c11 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala
@@ -47,7 +47,7 @@ final class LWWMap[A] private[akka] (
/**
* Scala API: All entries of the map.
*/
- def entries: Map[String, A] = underlying.entries.map { case (k, r) ⇒ k -> r.value }
+ def entries: Map[String, A] = underlying.entries.map { case (k, r) ⇒ k → r.value }
/**
* Java API: All entries of the map.
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala
index a4bf881ea1..77eca287f5 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala
@@ -93,8 +93,8 @@ object LWWRegister {
@SerialVersionUID(1L)
final class LWWRegister[A] private[akka] (
private[akka] val node: UniqueAddress,
- val value: A,
- val timestamp: Long)
+ val value: A,
+ val timestamp: Long)
extends ReplicatedData with ReplicatedDataSerialization {
import LWWRegister.{ Clock, defaultClock }
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
index 66bc02d31c..2280ff2456 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala
@@ -33,7 +33,7 @@ object ORMap {
*/
@SerialVersionUID(1L)
final class ORMap[A <: ReplicatedData] private[akka] (
- private[akka] val keys: ORSet[String],
+ private[akka] val keys: ORSet[String],
private[akka] val values: Map[String, A])
extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning {
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
index 8f49a63dfb..a74240217d 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala
@@ -52,7 +52,7 @@ final class ORMultiMap[A] private[akka] (private[akka] val underlying: ORMap[ORS
* Scala API: All entries of a multimap where keys are strings and values are sets.
*/
def entries: Map[String, Set[A]] =
- underlying.entries.map { case (k, v) ⇒ k -> v.elements }
+ underlying.entries.map { case (k, v) ⇒ k → v.elements }
/**
* Java API: All entries of a multimap where keys are strings and values are sets.
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
index 89fe4edcf5..6ed208ece9 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala
@@ -201,7 +201,7 @@ object ORSet {
@SerialVersionUID(1L)
final class ORSet[A] private[akka] (
private[akka] val elementsMap: Map[A, ORSet.Dot],
- private[akka] val vvector: VersionVector)
+ private[akka] val vvector: VersionVector)
extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning with FastMerge {
type T = ORSet[A]
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
index e53c501443..5cf0d440b9 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala
@@ -90,18 +90,21 @@ final class PNCounter private[akka] (
else this
override def merge(that: PNCounter): PNCounter =
- copy(increments = that.increments.merge(this.increments),
+ copy(
+ increments = that.increments.merge(this.increments),
decrements = that.decrements.merge(this.decrements))
override def needPruningFrom(removedNode: UniqueAddress): Boolean =
increments.needPruningFrom(removedNode) || decrements.needPruningFrom(removedNode)
override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): PNCounter =
- copy(increments = increments.prune(removedNode, collapseInto),
+ copy(
+ increments = increments.prune(removedNode, collapseInto),
decrements = decrements.prune(removedNode, collapseInto))
override def pruningCleanup(removedNode: UniqueAddress): PNCounter =
- copy(increments = increments.pruningCleanup(removedNode),
+ copy(
+ increments = increments.pruningCleanup(removedNode),
decrements = decrements.pruningCleanup(removedNode))
private def copy(increments: GCounter = this.increments, decrements: GCounter = this.decrements): PNCounter =
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala
index 999ce0c659..fe90897ad7 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala
@@ -34,12 +34,12 @@ final class PNCounterMap private[akka] (
type T = PNCounterMap
/** Scala API */
- def entries: Map[String, BigInt] = underlying.entries.map { case (k, c) ⇒ k -> c.value }
+ def entries: Map[String, BigInt] = underlying.entries.map { case (k, c) ⇒ k → c.value }
/** Java API */
def getEntries: java.util.Map[String, BigInteger] = {
import scala.collection.JavaConverters._
- underlying.entries.map { case (k, c) ⇒ k -> c.value.bigInteger }.asJava
+ underlying.entries.map { case (k, c) ⇒ k → c.value.bigInteger }.asJava
}
/**
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
index 8bc67711db..5a067d0090 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala
@@ -93,13 +93,13 @@ object ReplicatorSettings {
* be configured to worst case in a healthy cluster.
*/
final class ReplicatorSettings(
- val role: Option[String],
- val gossipInterval: FiniteDuration,
+ val role: Option[String],
+ val gossipInterval: FiniteDuration,
val notifySubscribersInterval: FiniteDuration,
- val maxDeltaElements: Int,
- val dispatcher: String,
- val pruningInterval: FiniteDuration,
- val maxPruningDissemination: FiniteDuration) {
+ val maxDeltaElements: Int,
+ val dispatcher: String,
+ val pruningInterval: FiniteDuration,
+ val maxPruningDissemination: FiniteDuration) {
def withRole(role: String): ReplicatorSettings = copy(role = ReplicatorSettings.roleOption(role))
@@ -126,13 +126,13 @@ final class ReplicatorSettings(
copy(pruningInterval = pruningInterval, maxPruningDissemination = maxPruningDissemination)
private def copy(
- role: Option[String] = role,
- gossipInterval: FiniteDuration = gossipInterval,
+ role: Option[String] = role,
+ gossipInterval: FiniteDuration = gossipInterval,
notifySubscribersInterval: FiniteDuration = notifySubscribersInterval,
- maxDeltaElements: Int = maxDeltaElements,
- dispatcher: String = dispatcher,
- pruningInterval: FiniteDuration = pruningInterval,
- maxPruningDissemination: FiniteDuration = maxPruningDissemination): ReplicatorSettings =
+ maxDeltaElements: Int = maxDeltaElements,
+ dispatcher: String = dispatcher,
+ pruningInterval: FiniteDuration = pruningInterval,
+ maxPruningDissemination: FiniteDuration = maxPruningDissemination): ReplicatorSettings =
new ReplicatorSettings(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher,
pruningInterval, maxPruningDissemination)
}
@@ -471,7 +471,7 @@ object Replicator {
val NotFoundDigest: Digest = ByteString(-1)
final case class DataEnvelope(
- data: ReplicatedData,
+ data: ReplicatedData,
pruning: Map[UniqueAddress, PruningState] = Map.empty)
extends ReplicatorMessage {
@@ -735,7 +735,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val selfUniqueAddress = cluster.selfUniqueAddress
require(!cluster.isTerminated, "Cluster node must not be terminated")
- require(role.forall(cluster.selfRoles.contains),
+ require(
+ role.forall(cluster.selfRoles.contains),
s"This cluster member [${selfAddress}] doesn't have the role [$role]")
//Start periodic gossip to random nodes in cluster
@@ -899,7 +900,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val merged = envelope.merge(pruningCleanupTombstoned(writeEnvelope)).addSeen(selfAddress)
setData(key, merged)
} else {
- log.warning("Wrong type for writing [{}], existing type [{}], got [{}]",
+ log.warning(
+ "Wrong type for writing [{}], existing type [{}], got [{}]",
key, existing.getClass.getName, writeEnvelope.data.getClass.getName)
}
case None ⇒
@@ -1048,14 +1050,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
if (keys.nonEmpty) {
if (log.isDebugEnabled)
log.debug("Sending gossip to [{}], containing [{}]", sender().path.address, keys.mkString(", "))
- val g = Gossip(keys.map(k ⇒ k -> getData(k).get)(collection.breakOut), sendBack = otherDifferentKeys.nonEmpty)
+ val g = Gossip(keys.map(k ⇒ k → getData(k).get)(collection.breakOut), sendBack = otherDifferentKeys.nonEmpty)
sender() ! g
}
val myMissingKeys = otherKeys diff myKeys
if (myMissingKeys.nonEmpty) {
if (log.isDebugEnabled)
log.debug("Sending gossip status to [{}], requesting missing [{}]", sender().path.address, myMissingKeys.mkString(", "))
- val status = Status(myMissingKeys.map(k ⇒ k -> NotFoundDigest)(collection.breakOut), chunk, totChunks)
+ val status = Status(myMissingKeys.map(k ⇒ k → NotFoundDigest)(collection.breakOut), chunk, totChunks)
sender() ! status
}
}
@@ -1305,12 +1307,12 @@ private[akka] abstract class ReadWriteAggregator extends Actor {
*/
private[akka] object WriteAggregator {
def props(
- key: KeyR,
- envelope: Replicator.Internal.DataEnvelope,
+ key: KeyR,
+ envelope: Replicator.Internal.DataEnvelope,
consistency: Replicator.WriteConsistency,
- req: Option[Any],
- nodes: Set[Address],
- replyTo: ActorRef): Props =
+ req: Option[Any],
+ nodes: Set[Address],
+ replyTo: ActorRef): Props =
Props(new WriteAggregator(key, envelope, consistency, req, nodes, replyTo))
.withDeploy(Deploy.local)
}
@@ -1319,12 +1321,12 @@ private[akka] object WriteAggregator {
* INTERNAL API
*/
private[akka] class WriteAggregator(
- key: KeyR,
- envelope: Replicator.Internal.DataEnvelope,
- consistency: Replicator.WriteConsistency,
- req: Option[Any],
+ key: KeyR,
+ envelope: Replicator.Internal.DataEnvelope,
+ consistency: Replicator.WriteConsistency,
+ req: Option[Any],
override val nodes: Set[Address],
- replyTo: ActorRef) extends ReadWriteAggregator {
+ replyTo: ActorRef) extends ReadWriteAggregator {
import Replicator._
import Replicator.Internal._
@@ -1384,12 +1386,12 @@ private[akka] class WriteAggregator(
*/
private[akka] object ReadAggregator {
def props(
- key: KeyR,
+ key: KeyR,
consistency: Replicator.ReadConsistency,
- req: Option[Any],
- nodes: Set[Address],
- localValue: Option[Replicator.Internal.DataEnvelope],
- replyTo: ActorRef): Props =
+ req: Option[Any],
+ nodes: Set[Address],
+ localValue: Option[Replicator.Internal.DataEnvelope],
+ replyTo: ActorRef): Props =
Props(new ReadAggregator(key, consistency, req, nodes, localValue, replyTo))
.withDeploy(Deploy.local)
@@ -1399,12 +1401,12 @@ private[akka] object ReadAggregator {
* INTERNAL API
*/
private[akka] class ReadAggregator(
- key: KeyR,
- consistency: Replicator.ReadConsistency,
- req: Option[Any],
+ key: KeyR,
+ consistency: Replicator.ReadConsistency,
+ req: Option[Any],
override val nodes: Set[Address],
- localValue: Option[Replicator.Internal.DataEnvelope],
- replyTo: ActorRef) extends ReadWriteAggregator {
+ localValue: Option[Replicator.Internal.DataEnvelope],
+ replyTo: ActorRef) extends ReadWriteAggregator {
import Replicator._
import Replicator.Internal._
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
index 99ee99b68f..e0a150ca11 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala
@@ -262,7 +262,7 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L
private[akka] override def increment(n: UniqueAddress): VersionVector = {
val v = Timestamp.counter.getAndIncrement()
if (n == node) copy(version = v)
- else ManyVersionVector(TreeMap(node -> version, n -> v))
+ else ManyVersionVector(TreeMap(node → version, n → v))
}
/** INTERNAL API */
@@ -282,7 +282,7 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L
that match {
case OneVersionVector(n2, v2) ⇒
if (node == n2) if (version >= v2) this else OneVersionVector(n2, v2)
- else ManyVersionVector(TreeMap(node -> version, n2 -> v2))
+ else ManyVersionVector(TreeMap(node → version, n2 → v2))
case ManyVersionVector(vs2) ⇒
val v2 = vs2.getOrElse(node, Timestamp.Zero)
val mergedVersions =
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
index 41f9d31557..eaf2b61692 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala
@@ -52,29 +52,29 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
private val VersionVectorManifest = "L"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef](
- GSetManifest -> gsetFromBinary,
- ORSetManifest -> orsetFromBinary,
- FlagManifest -> flagFromBinary,
- LWWRegisterManifest -> lwwRegisterFromBinary,
- GCounterManifest -> gcounterFromBinary,
- PNCounterManifest -> pncounterFromBinary,
- ORMapManifest -> ormapFromBinary,
- LWWMapManifest -> lwwmapFromBinary,
- PNCounterMapManifest -> pncountermapFromBinary,
- ORMultiMapManifest -> multimapFromBinary,
- DeletedDataManifest -> (_ ⇒ DeletedData),
- VersionVectorManifest -> versionVectorFromBinary,
+ GSetManifest → gsetFromBinary,
+ ORSetManifest → orsetFromBinary,
+ FlagManifest → flagFromBinary,
+ LWWRegisterManifest → lwwRegisterFromBinary,
+ GCounterManifest → gcounterFromBinary,
+ PNCounterManifest → pncounterFromBinary,
+ ORMapManifest → ormapFromBinary,
+ LWWMapManifest → lwwmapFromBinary,
+ PNCounterMapManifest → pncountermapFromBinary,
+ ORMultiMapManifest → multimapFromBinary,
+ DeletedDataManifest → (_ ⇒ DeletedData),
+ VersionVectorManifest → versionVectorFromBinary,
- GSetKeyManifest -> (bytes ⇒ GSetKey(keyIdFromBinary(bytes))),
- ORSetKeyManifest -> (bytes ⇒ ORSetKey(keyIdFromBinary(bytes))),
- FlagKeyManifest -> (bytes ⇒ FlagKey(keyIdFromBinary(bytes))),
- LWWRegisterKeyManifest -> (bytes ⇒ LWWRegisterKey(keyIdFromBinary(bytes))),
- GCounterKeyManifest -> (bytes ⇒ GCounterKey(keyIdFromBinary(bytes))),
- PNCounterKeyManifest -> (bytes ⇒ PNCounterKey(keyIdFromBinary(bytes))),
- ORMapKeyManifest -> (bytes ⇒ ORMapKey(keyIdFromBinary(bytes))),
- LWWMapKeyManifest -> (bytes ⇒ LWWMapKey(keyIdFromBinary(bytes))),
- PNCounterMapKeyManifest -> (bytes ⇒ PNCounterMapKey(keyIdFromBinary(bytes))),
- ORMultiMapKeyManifest -> (bytes ⇒ ORMultiMapKey(keyIdFromBinary(bytes))))
+ GSetKeyManifest → (bytes ⇒ GSetKey(keyIdFromBinary(bytes))),
+ ORSetKeyManifest → (bytes ⇒ ORSetKey(keyIdFromBinary(bytes))),
+ FlagKeyManifest → (bytes ⇒ FlagKey(keyIdFromBinary(bytes))),
+ LWWRegisterKeyManifest → (bytes ⇒ LWWRegisterKey(keyIdFromBinary(bytes))),
+ GCounterKeyManifest → (bytes ⇒ GCounterKey(keyIdFromBinary(bytes))),
+ PNCounterKeyManifest → (bytes ⇒ PNCounterKey(keyIdFromBinary(bytes))),
+ ORMapKeyManifest → (bytes ⇒ ORMapKey(keyIdFromBinary(bytes))),
+ LWWMapKeyManifest → (bytes ⇒ LWWMapKey(keyIdFromBinary(bytes))),
+ PNCounterMapKeyManifest → (bytes ⇒ PNCounterMapKey(keyIdFromBinary(bytes))),
+ ORMultiMapKeyManifest → (bytes ⇒ ORMultiMapKey(keyIdFromBinary(bytes))))
override def manifest(obj: AnyRef): String = obj match {
case _: ORSet[_] ⇒ ORSetManifest
@@ -284,7 +284,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def gcounterFromProto(gcounter: rd.GCounter): GCounter = {
new GCounter(state = gcounter.getEntriesList.asScala.map(entry ⇒
- uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray))(breakOut))
+ uniqueAddressFromProto(entry.getNode) → BigInt(entry.getValue.toByteArray))(breakOut))
}
def pncounterToProto(pncounter: PNCounter): rd.PNCounter =
@@ -322,7 +322,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
VersionVector(uniqueAddressFromProto(entries.get(0).getNode), entries.get(0).getVersion)
else {
val versions: TreeMap[UniqueAddress, Long] = versionVector.getEntriesList.asScala.map(entry ⇒
- uniqueAddressFromProto(entry.getNode) -> entry.getVersion)(breakOut)
+ uniqueAddressFromProto(entry.getNode) → entry.getVersion)(breakOut)
VersionVector(versions)
}
}
@@ -341,7 +341,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def ormapFromProto(ormap: rd.ORMap): ORMap[ReplicatedData] = {
val entries = ormap.getEntriesList.asScala.map(entry ⇒
- entry.getKey -> otherMessageFromProto(entry.getValue).asInstanceOf[ReplicatedData]).toMap
+ entry.getKey → otherMessageFromProto(entry.getValue).asInstanceOf[ReplicatedData]).toMap
new ORMap(
keys = orsetFromProto(ormap.getKeys).asInstanceOf[ORSet[String]],
entries)
@@ -361,7 +361,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def lwwmapFromProto(lwwmap: rd.LWWMap): LWWMap[Any] = {
val entries = lwwmap.getEntriesList.asScala.map(entry ⇒
- entry.getKey -> lwwRegisterFromProto(entry.getValue)).toMap
+ entry.getKey → lwwRegisterFromProto(entry.getValue)).toMap
new LWWMap(new ORMap(
keys = orsetFromProto(lwwmap.getKeys).asInstanceOf[ORSet[String]],
entries))
@@ -381,7 +381,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def pncountermapFromProto(pncountermap: rd.PNCounterMap): PNCounterMap = {
val entries = pncountermap.getEntriesList.asScala.map(entry ⇒
- entry.getKey -> pncounterFromProto(entry.getValue)).toMap
+ entry.getKey → pncounterFromProto(entry.getValue)).toMap
new PNCounterMap(new ORMap(
keys = orsetFromProto(pncountermap.getKeys).asInstanceOf[ORSet[String]],
entries))
@@ -401,7 +401,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def multimapFromProto(multimap: rd.ORMultiMap): ORMultiMap[Any] = {
val entries = multimap.getEntriesList.asScala.map(entry ⇒
- entry.getKey -> orsetFromProto(entry.getValue)).toMap
+ entry.getKey → orsetFromProto(entry.getValue)).toMap
new ORMultiMap(new ORMap(
keys = orsetFromProto(multimap.getKeys).asInstanceOf[ORSet[String]],
entries))
diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
index 42f46ec7c7..4df002af7b 100644
--- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
+++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala
@@ -169,20 +169,20 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
val GossipManifest = "N"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef](
- GetManifest -> getFromBinary,
- GetSuccessManifest -> getSuccessFromBinary,
- NotFoundManifest -> notFoundFromBinary,
- GetFailureManifest -> getFailureFromBinary,
- SubscribeManifest -> subscribeFromBinary,
- UnsubscribeManifest -> unsubscribeFromBinary,
- ChangedManifest -> changedFromBinary,
- DataEnvelopeManifest -> dataEnvelopeFromBinary,
- WriteManifest -> writeFromBinary,
- WriteAckManifest -> (_ ⇒ WriteAck),
- ReadManifest -> readFromBinary,
- ReadResultManifest -> readResultFromBinary,
- StatusManifest -> statusFromBinary,
- GossipManifest -> gossipFromBinary)
+ GetManifest → getFromBinary,
+ GetSuccessManifest → getSuccessFromBinary,
+ NotFoundManifest → notFoundFromBinary,
+ GetFailureManifest → getFailureFromBinary,
+ SubscribeManifest → subscribeFromBinary,
+ UnsubscribeManifest → unsubscribeFromBinary,
+ ChangedManifest → changedFromBinary,
+ DataEnvelopeManifest → dataEnvelopeFromBinary,
+ WriteManifest → writeFromBinary,
+ WriteAckManifest → (_ ⇒ WriteAck),
+ ReadManifest → readFromBinary,
+ ReadResultManifest → readResultFromBinary,
+ StatusManifest → statusFromBinary,
+ GossipManifest → gossipFromBinary)
override def manifest(obj: AnyRef): String = obj match {
case _: DataEnvelope ⇒ DataEnvelopeManifest
@@ -243,8 +243,9 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def statusFromBinary(bytes: Array[Byte]): Status = {
val status = dm.Status.parseFrom(bytes)
- Status(status.getEntriesList.asScala.map(e ⇒
- e.getKey -> AkkaByteString(e.getDigest.toByteArray()))(breakOut),
+ Status(
+ status.getEntriesList.asScala.map(e ⇒
+ e.getKey → AkkaByteString(e.getDigest.toByteArray()))(breakOut),
status.getChunk, status.getTotChunks)
}
@@ -261,8 +262,9 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def gossipFromBinary(bytes: Array[Byte]): Gossip = {
val gossip = dm.Gossip.parseFrom(decompress(bytes))
- Gossip(gossip.getEntriesList.asScala.map(e ⇒
- e.getKey -> dataEnvelopeFromProto(e.getEnvelope))(breakOut),
+ Gossip(
+ gossip.getEntriesList.asScala.map(e ⇒
+ e.getKey → dataEnvelopeFromProto(e.getEnvelope))(breakOut),
sendBack = gossip.getSendBack)
}
@@ -408,7 +410,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
else PruningState.PruningInitialized(pruningEntry.getSeenList.asScala.map(addressFromProto)(breakOut))
val state = PruningState(uniqueAddressFromProto(pruningEntry.getOwnerAddress), phase)
val removed = uniqueAddressFromProto(pruningEntry.getRemovedAddress)
- removed -> state
+ removed → state
}(breakOut)
val data = otherMessageFromProto(dataEnvelope.getData).asInstanceOf[ReplicatedData]
DataEnvelope(data, pruning)
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala
index 50d02ead79..b7a70f86b4 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala
@@ -59,7 +59,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w
// val totalCount = 2000
val expectedData = (0 until totalCount).toSet
val data: Map[RoleName, Seq[Int]] = {
- val nodeIndex = nodes.zipWithIndex.map { case (n, i) ⇒ i -> n }.toMap
+ val nodeIndex = nodes.zipWithIndex.map { case (n, i) ⇒ i → n }.toMap
(0 until totalCount).groupBy(i ⇒ nodeIndex(i % nodeCount))
}
lazy val myData: Seq[Int] = data(myself)
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala
index 59ab03b85e..7fe8e31c73 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala
@@ -115,7 +115,8 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult
replicator ! Update(KeyA, GCounter(), WriteLocal)(_ + 20)
replicator ! Update(KeyB, PNCounter(), WriteTo(2, timeout))(_ + 20)
replicator ! Update(KeyC, GCounter(), WriteAll(timeout))(_ + 20)
- receiveN(3).toSet should be(Set(UpdateSuccess(KeyA, None),
+ receiveN(3).toSet should be(Set(
+ UpdateSuccess(KeyA, None),
UpdateSuccess(KeyB, None), UpdateSuccess(KeyC, None)))
replicator ! Update(KeyE, GSet(), WriteLocal)(_ + "e1" + "e2")
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala
index 6954199ace..b24b185779 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala
@@ -146,7 +146,7 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST
replicator ! Get(KeyC, ReadLocal)
expectMsgPF() {
case g @ GetSuccess(KeyC, _) ⇒
- g.get(KeyC).entries should be(Map("x" -> 3L, "y" -> 3L))
+ g.get(KeyC).entries should be(Map("x" → 3L, "y" → 3L))
g.get(KeyC).needPruningFrom(thirdUniqueAddress) should be(false)
}
}
diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala
index f37b7ee24e..2c72c3cf0c 100644
--- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala
+++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala
@@ -526,22 +526,22 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec
runOn(second) {
replicator ! Subscribe(KeyH, changedProbe.ref)
- replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" -> Flag(enabled = false)))
- changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" -> Flag(enabled = false)))
+ replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" → Flag(enabled = false)))
+ changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" → Flag(enabled = false)))
}
enterBarrier("update-h1")
runOn(first) {
- replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" -> Flag(enabled = true)))
+ replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" → Flag(enabled = true)))
}
runOn(second) {
- changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" -> Flag(enabled = true)))
+ changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" → Flag(enabled = true)))
- replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("b" -> Flag(enabled = true)))
+ replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("b" → Flag(enabled = true)))
changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(
- Map("a" -> Flag(enabled = true), "b" -> Flag(enabled = true)))
+ Map("a" → Flag(enabled = true), "b" → Flag(enabled = true)))
}
enterBarrierAfterTestStep()
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala
index 5ca79a37ef..d5e70cf3ef 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala
@@ -20,7 +20,7 @@ class LWWMapSpec extends WordSpec with Matchers {
"be able to set entries" in {
val m = LWWMap.empty[Int].put(node1, "a", 1, defaultClock[Int]).put(node2, "b", 2, defaultClock[Int])
- m.entries should be(Map("a" -> 1, "b" -> 2))
+ m.entries should be(Map("a" → 1, "b" → 2))
}
"be able to have its entries correctly merged with another LWWMap with other entries" in {
@@ -28,7 +28,7 @@ class LWWMapSpec extends WordSpec with Matchers {
val m2 = LWWMap.empty.put(node2, "c", 3, defaultClock[Int])
// merge both ways
- val expected = Map("a" -> 1, "b" -> 2, "c" -> 3)
+ val expected = Map("a" → 1, "b" → 2, "c" → 3)
(m1 merge m2).entries should be(expected)
(m2 merge m1).entries should be(expected)
}
@@ -40,11 +40,11 @@ class LWWMapSpec extends WordSpec with Matchers {
val merged1 = m1 merge m2
val m3 = merged1.remove(node1, "b")
- (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 3))
+ (merged1 merge m3).entries should be(Map("a" → 1, "c" → 3))
// but if there is a conflicting update the entry is not removed
val m4 = merged1.put(node2, "b", 22, defaultClock[Int])
- (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3))
+ (m3 merge m4).entries should be(Map("a" → 1, "b" → 22, "c" → 3))
}
"have unapply extractor" in {
@@ -55,7 +55,7 @@ class LWWMapSpec extends WordSpec with Matchers {
case c @ Changed(LWWMapKey("key")) ⇒
val LWWMap(entries3) = c.dataValue
val entries4: Map[String, Long] = entries3
- entries4 should be(Map("a" -> 1L))
+ entries4 should be(Map("a" → 1L))
}
}
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala
index f2c2c22734..ccc2a2f7eb 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala
@@ -41,7 +41,8 @@ class LocalConcurrencySpec(_system: ActorSystem) extends TestKit(_system)
import LocalConcurrencySpec._
def this() {
- this(ActorSystem("LocalConcurrencySpec",
+ this(ActorSystem(
+ "LocalConcurrencySpec",
ConfigFactory.parseString("""
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.netty.tcp.port=0
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala
index 7b96a1c6ce..8fb29220a0 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala
@@ -197,7 +197,7 @@ class ORMapSpec extends WordSpec with Matchers {
case c @ Changed(ORMapKey("key")) ⇒
val ORMap(entries3) = c.dataValue
val entries4: Map[String, ReplicatedData] = entries3
- entries4 should be(Map("a" -> Flag(true), "b" -> Flag(false)))
+ entries4 should be(Map("a" → Flag(true), "b" → Flag(false)))
}
}
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala
index 38b6e930a6..e0d027e0dc 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala
@@ -17,20 +17,20 @@ class ORMultiMapSpec extends WordSpec with Matchers {
"be able to add entries" in {
val m = ORMultiMap().addBinding(node1, "a", "A").addBinding(node1, "b", "B")
- m.entries should be(Map("a" -> Set("A"), "b" -> Set("B")))
+ m.entries should be(Map("a" → Set("A"), "b" → Set("B")))
val m2 = m.addBinding(node1, "a", "C")
- m2.entries should be(Map("a" -> Set("A", "C"), "b" -> Set("B")))
+ m2.entries should be(Map("a" → Set("A", "C"), "b" → Set("B")))
}
"be able to remove entry" in {
val m = ORMultiMap().addBinding(node1, "a", "A").addBinding(node1, "b", "B").removeBinding(node1, "a", "A")
- m.entries should be(Map("b" -> Set("B")))
+ m.entries should be(Map("b" → Set("B")))
}
"be able to replace an entry" in {
val m = ORMultiMap().addBinding(node1, "a", "A").replaceBinding(node1, "a", "A", "B")
- m.entries should be(Map("a" -> Set("B")))
+ m.entries should be(Map("a" → Set("B")))
}
"be able to have its entries correctly merged with another ORMultiMap with other entries" in {
@@ -40,9 +40,9 @@ class ORMultiMapSpec extends WordSpec with Matchers {
// merge both ways
val expectedMerge = Map(
- "a" -> Set("A"),
- "b" -> Set("B"),
- "c" -> Set("C"))
+ "a" → Set("A"),
+ "b" → Set("B"),
+ "c" → Set("C"))
val merged1 = m1 merge m2
merged1.entries should be(expectedMerge)
@@ -67,10 +67,10 @@ class ORMultiMapSpec extends WordSpec with Matchers {
// merge both ways
val expectedMerged = Map(
- "a" -> Set("A2"),
- "b" -> Set("B1"),
- "c" -> Set("C2"),
- "d" -> Set("D1", "D2"))
+ "a" → Set("A2"),
+ "b" → Set("B1"),
+ "c" → Set("C2"),
+ "d" → Set("D1", "D2"))
val merged1 = m1 merge m2
merged1.entries should be(expectedMerged)
@@ -89,8 +89,8 @@ class ORMultiMapSpec extends WordSpec with Matchers {
val m2 = m.put(node1, "a", a - "A1")
val expectedMerged = Map(
- "a" -> Set("A2"),
- "b" -> Set("B1"))
+ "a" → Set("A2"),
+ "b" → Set("B1"))
m2.entries should be(expectedMerged)
}
@@ -104,7 +104,7 @@ class ORMultiMapSpec extends WordSpec with Matchers {
"remove all bindings for a given key" in {
val m = ORMultiMap().addBinding(node1, "a", "A1").addBinding(node1, "a", "A2").addBinding(node1, "b", "B1")
val m2 = m.remove(node1, "a")
- m2.entries should be(Map("b" -> Set("B1")))
+ m2.entries should be(Map("b" → Set("B1")))
}
"have unapply extractor" in {
@@ -116,7 +116,7 @@ class ORMultiMapSpec extends WordSpec with Matchers {
case c @ Changed(ORMultiMapKey("key")) ⇒
val ORMultiMap(entries3) = c.dataValue
val entries4: Map[String, Set[Long]] = entries3
- entries4 should be(Map("a" -> Set(1L, 2L), "b" -> Set(3L)))
+ entries4 should be(Map("a" → Set(1L, 2L), "b" → Set(3L)))
}
}
}
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
index 5d83500f6c..1e8188c07e 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala
@@ -228,30 +228,30 @@ class ORSetSpec extends WordSpec with Matchers {
"ORSet unit test" must {
"verify subtractDots" in {
- val dot = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 2L, nodeD -> 14L, nodeG -> 22L))
- val vvector = VersionVector(TreeMap(nodeA -> 4L, nodeB -> 1L, nodeC -> 1L, nodeD -> 14L, nodeE -> 5L, nodeF -> 2L))
- val expected = VersionVector(TreeMap(nodeB -> 2L, nodeG -> 22L))
+ val dot = VersionVector(TreeMap(nodeA → 3L, nodeB → 2L, nodeD → 14L, nodeG → 22L))
+ val vvector = VersionVector(TreeMap(nodeA → 4L, nodeB → 1L, nodeC → 1L, nodeD → 14L, nodeE → 5L, nodeF → 2L))
+ val expected = VersionVector(TreeMap(nodeB → 2L, nodeG → 22L))
ORSet.subtractDots(dot, vvector) should be(expected)
}
"verify mergeCommonKeys" in {
val commonKeys: Set[String] = Set("K1", "K2")
- val thisDot1 = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L))
- val thisDot2 = VersionVector(TreeMap(nodeB -> 5L, nodeC -> 2L))
- val thisVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 5L, nodeC -> 2L, nodeD -> 7L))
+ val thisDot1 = VersionVector(TreeMap(nodeA → 3L, nodeD → 7L))
+ val thisDot2 = VersionVector(TreeMap(nodeB → 5L, nodeC → 2L))
+ val thisVvector = VersionVector(TreeMap(nodeA → 3L, nodeB → 5L, nodeC → 2L, nodeD → 7L))
val thisSet = new ORSet(
- elementsMap = Map("K1" -> thisDot1, "K2" -> thisDot2),
+ elementsMap = Map("K1" → thisDot1, "K2" → thisDot2),
vvector = thisVvector)
val thatDot1 = VersionVector(nodeA, 3L)
val thatDot2 = VersionVector(nodeB, 6L)
- val thatVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 6L, nodeC -> 1L, nodeD -> 8L))
+ val thatVvector = VersionVector(TreeMap(nodeA → 3L, nodeB → 6L, nodeC → 1L, nodeD → 8L))
val thatSet = new ORSet(
- elementsMap = Map("K1" -> thatDot1, "K2" -> thatDot2),
+ elementsMap = Map("K1" → thatDot1, "K2" → thatDot2),
vvector = thatVvector)
val expectedDots = Map(
- "K1" -> VersionVector(nodeA, 3L),
- "K2" -> VersionVector(TreeMap(nodeB -> 6L, nodeC -> 2L)))
+ "K1" → VersionVector(nodeA, 3L),
+ "K2" → VersionVector(TreeMap(nodeB → 6L, nodeC → 2L)))
ORSet.mergeCommonKeys(commonKeys, thisSet, thatSet) should be(expectedDots)
}
@@ -259,14 +259,14 @@ class ORSetSpec extends WordSpec with Matchers {
"verify mergeDisjointKeys" in {
val keys: Set[Any] = Set("K3", "K4", "K5")
val elements: Map[Any, VersionVector] = Map(
- "K3" -> VersionVector(nodeA, 4L),
- "K4" -> VersionVector(TreeMap(nodeA -> 3L, nodeD -> 8L)),
- "K5" -> VersionVector(nodeA, 2L))
- val vvector = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L))
- val acc: Map[Any, VersionVector] = Map("K1" -> VersionVector(nodeA, 3L))
+ "K3" → VersionVector(nodeA, 4L),
+ "K4" → VersionVector(TreeMap(nodeA → 3L, nodeD → 8L)),
+ "K5" → VersionVector(nodeA, 2L))
+ val vvector = VersionVector(TreeMap(nodeA → 3L, nodeD → 7L))
+ val acc: Map[Any, VersionVector] = Map("K1" → VersionVector(nodeA, 3L))
val expectedDots = acc ++ Map(
- "K3" -> VersionVector(nodeA, 4L),
- "K4" -> VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen
+ "K3" → VersionVector(nodeA, 4L),
+ "K4" → VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen
ORSet.mergeDisjointKeys(keys, elements, vvector, acc) should be(expectedDots)
}
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala
index 3b621f120c..fc5234a342 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala
@@ -19,7 +19,7 @@ class PNCounterMapSpec extends WordSpec with Matchers {
"be able to increment and decrement entries" in {
val m = PNCounterMap().increment(node1, "a", 2).increment(node1, "b", 3).decrement(node2, "a", 1)
- m.entries should be(Map("a" -> 1, "b" -> 3))
+ m.entries should be(Map("a" → 1, "b" → 3))
}
"be able to have its entries correctly merged with another ORMap with other entries" in {
@@ -27,7 +27,7 @@ class PNCounterMapSpec extends WordSpec with Matchers {
val m2 = PNCounterMap().increment(node2, "c", 5)
// merge both ways
- val expected = Map("a" -> 1, "b" -> 3, "c" -> 7)
+ val expected = Map("a" → 1, "b" → 3, "c" → 7)
(m1 merge m2).entries should be(expected)
(m2 merge m1).entries should be(expected)
}
@@ -39,11 +39,11 @@ class PNCounterMapSpec extends WordSpec with Matchers {
val merged1 = m1 merge m2
val m3 = merged1.remove(node1, "b")
- (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 7))
+ (merged1 merge m3).entries should be(Map("a" → 1, "c" → 7))
// but if there is a conflicting update the entry is not removed
val m4 = merged1.increment(node2, "b", 10)
- (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7))
+ (m3 merge m4).entries should be(Map("a" → 1, "b" → 13, "c" → 7))
}
"have unapply extractor" in {
@@ -54,7 +54,7 @@ class PNCounterMapSpec extends WordSpec with Matchers {
case c @ Changed(PNCounterMapKey("key")) ⇒
val PNCounterMap(entries3) = c.dataValue
val entries4: Map[String, BigInt] = entries3
- entries4 should be(Map("a" -> 1L, "b" -> 2L))
+ entries4 should be(Map("a" → 1L, "b" → 2L))
}
}
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala
index 98e0776563..5ce047ddcf 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala
@@ -68,7 +68,7 @@ class WriteAggregatorSpec extends AkkaSpec("""
val writeMajority = WriteMajority(timeout)
def probes(probe: ActorRef): Map[Address, ActorRef] =
- nodes.toSeq.map(_ -> system.actorOf(WriteAggregatorSpec.writeAckAdapterProps(probe))).toMap
+ nodes.toSeq.map(_ → system.actorOf(WriteAggregatorSpec.writeAckAdapterProps(probe))).toMap
"WriteAggregator" must {
"send to at least N/2+1 replicas when WriteMajority" in {
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
index 8f338f7aff..3512224da2 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala
@@ -25,7 +25,8 @@ import akka.testkit.TestKit
import akka.cluster.UniqueAddress
import com.typesafe.config.ConfigFactory
-class ReplicatedDataSerializerSpec extends TestKit(ActorSystem("ReplicatedDataSerializerSpec",
+class ReplicatedDataSerializerSpec extends TestKit(ActorSystem(
+ "ReplicatedDataSerializerSpec",
ConfigFactory.parseString("""
akka.actor.provider=akka.cluster.ClusterActorRefProvider
akka.remote.netty.tcp.port=0
diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
index a622d6a756..7206a7d11f 100644
--- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
+++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala
@@ -23,7 +23,8 @@ import akka.util.ByteString
import akka.cluster.UniqueAddress
import com.typesafe.config.ConfigFactory
-class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMessageSerializerSpec",
+class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem(
+ "ReplicatorMessageSerializerSpec",
ConfigFactory.parseString("""
akka.actor.provider=akka.cluster.ClusterActorRefProvider
akka.remote.netty.tcp.port=0
@@ -64,17 +65,19 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMes
checkSerialization(Changed(keyA)(data1))
checkSerialization(DataEnvelope(data1))
checkSerialization(DataEnvelope(data1, pruning = Map(
- address1 -> PruningState(address2, PruningPerformed),
- address3 -> PruningState(address2, PruningInitialized(Set(address1.address))))))
+ address1 → PruningState(address2, PruningPerformed),
+ address3 → PruningState(address2, PruningInitialized(Set(address1.address))))))
checkSerialization(Write("A", DataEnvelope(data1)))
checkSerialization(WriteAck)
checkSerialization(Read("A"))
checkSerialization(ReadResult(Some(DataEnvelope(data1))))
checkSerialization(ReadResult(None))
- checkSerialization(Status(Map("A" -> ByteString.fromString("a"),
- "B" -> ByteString.fromString("b")), chunk = 3, totChunks = 10))
- checkSerialization(Gossip(Map("A" -> DataEnvelope(data1),
- "B" -> DataEnvelope(GSet() + "b" + "c")), sendBack = true))
+ checkSerialization(Status(Map(
+ "A" → ByteString.fromString("a"),
+ "B" → ByteString.fromString("b")), chunk = 3, totChunks = 10))
+ checkSerialization(Gossip(Map(
+ "A" → DataEnvelope(data1),
+ "B" → DataEnvelope(GSet() + "b" + "c")), sendBack = true))
}
}
@@ -141,7 +144,7 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMes
"handle Int wrap around" ignore { // ignored because it takes 20 seconds (but it works)
val cache = new SmallCache[Read, String](2, 5.seconds, _ ⇒ null)
val a = Read("a")
- val x = a -> "A"
+ val x = a → "A"
var n = 0
while (n <= Int.MaxValue - 3) {
cache.add(x)
diff --git a/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala
index 2e1d6d9d19..97d5fbc6e1 100644
--- a/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala
+++ b/akka-docs/rst/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala
@@ -22,7 +22,8 @@ class DangerousActor extends Actor with ActorLogging {
import context.dispatcher
val breaker =
- new CircuitBreaker(context.system.scheduler,
+ new CircuitBreaker(
+ context.system.scheduler,
maxFailures = 5,
callTimeout = 10.seconds,
resetTimeout = 1.minute).onOpen(notifyMeOnOpen())
diff --git a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala
index 8e180f1da0..4e3be19626 100644
--- a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala
+++ b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSample.scala
@@ -134,10 +134,11 @@ class CounterService extends Actor {
// Restart the storage child when StorageException is thrown.
// After 3 restarts within 5 seconds it will be stopped.
- override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 3,
+ override val supervisorStrategy = OneForOneStrategy(
+ maxNrOfRetries = 3,
withinTimeRange = 5 seconds) {
- case _: Storage.StorageException => Restart
- }
+ case _: Storage.StorageException => Restart
+ }
val key = self.path.name
var storage: Option[ActorRef] = None
diff --git a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala
index 09f2f6c6a9..f72ca4290a 100644
--- a/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/actor/FaultHandlingDocSpec.scala
@@ -102,7 +102,8 @@ object FaultHandlingDocSpec {
class FaultHandlingDocSpec(_system: ActorSystem) extends TestKit(_system)
with ImplicitSender with FlatSpecLike with Matchers with BeforeAndAfterAll {
- def this() = this(ActorSystem("FaultHandlingDocSpec",
+ def this() = this(ActorSystem(
+ "FaultHandlingDocSpec",
ConfigFactory.parseString("""
akka {
loggers = ["akka.testkit.TestEventListener"]
diff --git a/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala
index 1ddf165dbf..38c44ef534 100644
--- a/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/actor/SchedulerDocSpec.scala
@@ -53,7 +53,8 @@ class SchedulerDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
//This will schedule to send the Tick-message
//to the tickActor after 0ms repeating every 50ms
val cancellable =
- system.scheduler.schedule(0 milliseconds,
+ system.scheduler.schedule(
+ 0 milliseconds,
50 milliseconds,
tickActor,
Tick)
diff --git a/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala
index 3c6376af0d..e374cc5318 100644
--- a/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/actor/TypedActorDocSpec.scala
@@ -121,7 +121,8 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
//#typed-actor-create1
//#typed-actor-create2
val otherSquarer: Squarer =
- TypedActor(system).typedActorOf(TypedProps(classOf[Squarer],
+ TypedActor(system).typedActorOf(TypedProps(
+ classOf[Squarer],
new SquarerImpl("foo")), "name")
//#typed-actor-create2
diff --git a/akka-docs/rst/scala/code/docs/akka/typed/IntroSpec.scala b/akka-docs/rst/scala/code/docs/akka/typed/IntroSpec.scala
index c8d4bce10c..ec9a14ed9d 100644
--- a/akka-docs/rst/scala/code/docs/akka/typed/IntroSpec.scala
+++ b/akka-docs/rst/scala/code/docs/akka/typed/IntroSpec.scala
@@ -21,7 +21,7 @@ object IntroSpec {
final case class Greet(whom: String, replyTo: ActorRef[Greeted])
final case class Greeted(whom: String)
- val greeter = Static[Greet] { msg ⇒
+ val greeter = Static[Greet] { msg =>
println(s"Hello ${msg.whom}!")
msg.replyTo ! Greeted(msg.whom)
}
@@ -51,17 +51,17 @@ object IntroSpec {
//#chatroom-behavior
val behavior: Behavior[GetSession] =
- ContextAware[Command] { ctx ⇒
+ ContextAware[Command] { ctx =>
var sessions = List.empty[ActorRef[SessionEvent]]
Static {
- case GetSession(screenName, client) ⇒
+ case GetSession(screenName, client) =>
sessions ::= client
val wrapper = ctx.spawnAdapter {
- p: PostMessage ⇒ PostSessionMessage(screenName, p.message)
+ p: PostMessage => PostSessionMessage(screenName, p.message)
}
client ! SessionGranted(wrapper)
- case PostSessionMessage(screenName, message) ⇒
+ case PostSessionMessage(screenName, message) =>
val mp = MessagePosted(screenName, message)
sessions foreach (_ ! mp)
}
@@ -98,13 +98,13 @@ class IntroSpec extends TypedSpec {
val gabbler: Behavior[SessionEvent] =
Total {
- case SessionDenied(reason) ⇒
+ case SessionDenied(reason) =>
println(s"cannot start chat room session: $reason")
Stopped
- case SessionGranted(handle) ⇒
+ case SessionGranted(handle) =>
handle ! PostMessage("Hello World!")
Same
- case MessagePosted(screenName, message) ⇒
+ case MessagePosted(screenName, message) =>
println(s"message has been posted by '$screenName': $message")
Stopped
}
@@ -113,13 +113,13 @@ class IntroSpec extends TypedSpec {
//#chatroom-main
val main: Behavior[Unit] =
Full {
- case Sig(ctx, PreStart) ⇒
+ case Sig(ctx, PreStart) =>
val chatRoom = ctx.spawn(Props(ChatRoom.behavior), "chatroom")
val gabblerRef = ctx.spawn(Props(gabbler), "gabbler")
ctx.watch(gabblerRef)
chatRoom ! GetSession("ol’ Gabbler", gabblerRef)
Same
- case Sig(_, Terminated(ref)) ⇒
+ case Sig(_, Terminated(ref)) =>
Stopped
}
diff --git a/akka-docs/rst/scala/code/docs/camel/Introduction.scala b/akka-docs/rst/scala/code/docs/camel/Introduction.scala
index 68918ffcbd..ed5e6f7383 100644
--- a/akka-docs/rst/scala/code/docs/camel/Introduction.scala
+++ b/akka-docs/rst/scala/code/docs/camel/Introduction.scala
@@ -93,13 +93,15 @@ object Introduction {
val camel = CamelExtension(system)
val actorRef = system.actorOf(Props[MyEndpoint])
// get a future reference to the activation of the endpoint of the Consumer Actor
- val activationFuture = camel.activationFutureFor(actorRef)(timeout = 10 seconds,
+ val activationFuture = camel.activationFutureFor(actorRef)(
+ timeout = 10 seconds,
executor = system.dispatcher)
//#CamelActivation
//#CamelDeactivation
system.stop(actorRef)
// get a future reference to the deactivation of the endpoint of the Consumer Actor
- val deactivationFuture = camel.deactivationFutureFor(actorRef)(timeout = 10 seconds,
+ val deactivationFuture = camel.deactivationFutureFor(actorRef)(
+ timeout = 10 seconds,
executor = system.dispatcher)
//#CamelDeactivation
}
diff --git a/akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala b/akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala
index 8f331485b1..cd5658f4f1 100644
--- a/akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala
+++ b/akka-docs/rst/scala/code/docs/ddata/TwoPhaseSet.scala
@@ -8,7 +8,7 @@ import akka.cluster.ddata.GSet
//#twophaseset
case class TwoPhaseSet(
- adds: GSet[String] = GSet.empty,
+ adds: GSet[String] = GSet.empty,
removals: GSet[String] = GSet.empty)
extends ReplicatedData {
type T = TwoPhaseSet
diff --git a/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala
index e3366e6752..7d2b4ff062 100644
--- a/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala
+++ b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer.scala
@@ -22,8 +22,8 @@ class TwoPhaseSetSerializer(val system: ExtendedActorSystem)
override def identifier = 99999
override def toBinary(obj: AnyRef): Array[Byte] = obj match {
- case m: TwoPhaseSet ⇒ twoPhaseSetToProto(m).toByteArray
- case _ ⇒ throw new IllegalArgumentException(
+ case m: TwoPhaseSet => twoPhaseSetToProto(m).toByteArray
+ case _ => throw new IllegalArgumentException(
s"Can't serialize object of type ${obj.getClass}")
}
@@ -62,8 +62,8 @@ class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem)
extends TwoPhaseSetSerializer(system) {
//#compression
override def toBinary(obj: AnyRef): Array[Byte] = obj match {
- case m: TwoPhaseSet ⇒ compress(twoPhaseSetToProto(m))
- case _ ⇒ throw new IllegalArgumentException(
+ case m: TwoPhaseSet => compress(twoPhaseSetToProto(m))
+ case _ => throw new IllegalArgumentException(
s"Can't serialize object of type ${obj.getClass}")
}
diff --git a/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala
index 266782917c..d6cbd4c07e 100644
--- a/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala
+++ b/akka-docs/rst/scala/code/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala
@@ -22,8 +22,8 @@ class TwoPhaseSetSerializer2(val system: ExtendedActorSystem)
val replicatedDataSerializer = new ReplicatedDataSerializer(system)
override def toBinary(obj: AnyRef): Array[Byte] = obj match {
- case m: TwoPhaseSet ⇒ twoPhaseSetToProto(m).toByteArray
- case _ ⇒ throw new IllegalArgumentException(
+ case m: TwoPhaseSet => twoPhaseSetToProto(m).toByteArray
+ case _ => throw new IllegalArgumentException(
s"Can't serialize object of type ${obj.getClass}")
}
diff --git a/akka-docs/rst/scala/code/docs/dispatcher/MyUnboundedMailbox.scala b/akka-docs/rst/scala/code/docs/dispatcher/MyUnboundedMailbox.scala
index 33ff7bc59f..02c1acd77a 100644
--- a/akka-docs/rst/scala/code/docs/dispatcher/MyUnboundedMailbox.scala
+++ b/akka-docs/rst/scala/code/docs/dispatcher/MyUnboundedMailbox.scala
@@ -52,8 +52,9 @@ class MyUnboundedMailbox extends MailboxType
}
// The create method is called to create the MessageQueue
- final override def create(owner: Option[ActorRef],
- system: Option[ActorSystem]): MessageQueue =
+ final override def create(
+ owner: Option[ActorRef],
+ system: Option[ActorSystem]): MessageQueue =
new MyMessageQueue()
}
//#mailbox-implementation-example
diff --git a/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala
index 2e01e6cbb7..cb5605b6a9 100644
--- a/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/extension/SettingsExtensionDocSpec.scala
@@ -22,7 +22,8 @@ import akka.testkit.AkkaSpec
class SettingsImpl(config: Config) extends Extension {
val DbUri: String = config.getString("myapp.db.uri")
val CircuitBreakerTimeout: Duration =
- Duration(config.getMilliseconds("myapp.circuit-breaker.timeout"),
+ Duration(
+ config.getMilliseconds("myapp.circuit-breaker.timeout"),
TimeUnit.MILLISECONDS)
}
//#extension
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/HttpServerExampleSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/HttpServerExampleSpec.scala
index 369b1a2c2a..9514448a70 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/HttpServerExampleSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/HttpServerExampleSpec.scala
@@ -178,7 +178,8 @@ class HttpServerExampleSpec extends WordSpec with Matchers
val requestHandler: HttpRequest => HttpResponse = {
case HttpRequest(GET, Uri.Path("/"), _, _, _) =>
- HttpResponse(entity = HttpEntity(ContentTypes.`text/html(UTF-8)`,
+ HttpResponse(entity = HttpEntity(
+ ContentTypes.`text/html(UTF-8)`,
"
Hello world!"))
case HttpRequest(GET, Uri.Path("/ping"), _, _, _) =>
@@ -218,7 +219,8 @@ class HttpServerExampleSpec extends WordSpec with Matchers
val requestHandler: HttpRequest => HttpResponse = {
case HttpRequest(GET, Uri.Path("/"), _, _, _) =>
- HttpResponse(entity = HttpEntity(ContentTypes.`text/html(UTF-8)`,
+ HttpResponse(entity = HttpEntity(
+ ContentTypes.`text/html(UTF-8)`,
"Hello world!"))
case HttpRequest(GET, Uri.Path("/ping"), _, _, _) =>
@@ -236,7 +238,7 @@ class HttpServerExampleSpec extends WordSpec with Matchers
StdIn.readLine() // let it run until user presses return
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
- .onComplete(_ ⇒ system.terminate()) // and shutdown when done
+ .onComplete(_ => system.terminate()) // and shutdown when done
}
}
@@ -278,7 +280,7 @@ class HttpServerExampleSpec extends WordSpec with Matchers
StdIn.readLine() // let it run until user presses return
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
- .onComplete(_ ⇒ system.terminate()) // and shutdown when done
+ .onComplete(_ => system.terminate()) // and shutdown when done
}
}
}
@@ -310,7 +312,7 @@ class HttpServerExampleSpec extends WordSpec with Matchers
StdIn.readLine() // let it run until user presses return
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
- .onComplete(_ ⇒ system.terminate()) // and shutdown when done
+ .onComplete(_ => system.terminate()) // and shutdown when done
}
}
}
@@ -466,7 +468,7 @@ class HttpServerExampleSpec extends WordSpec with Matchers
StdIn.readLine() // let it run until user presses return
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
- .onComplete(_ ⇒ system.terminate()) // and shutdown when done
+ .onComplete(_ => system.terminate()) // and shutdown when done
}
}
//#stream-random-numbers
@@ -533,7 +535,7 @@ class HttpServerExampleSpec extends WordSpec with Matchers
StdIn.readLine() // let it run until user presses return
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
- .onComplete(_ ⇒ system.terminate()) // and shutdown when done
+ .onComplete(_ => system.terminate()) // and shutdown when done
}
}
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/RejectionHandlerExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/RejectionHandlerExamplesSpec.scala
index 8ef948c529..587106986f 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/RejectionHandlerExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/RejectionHandlerExamplesSpec.scala
@@ -22,13 +22,13 @@ object MyRejectionHandler {
.handle { case MissingCookieRejection(cookieName) =>
complete(HttpResponse(BadRequest, entity = "No cookies, no service!!!"))
}
- .handle { case AuthorizationFailedRejection ⇒
+ .handle { case AuthorizationFailedRejection =>
complete((Forbidden, "You're out of your depth!"))
}
- .handle { case ValidationRejection(msg, _) ⇒
+ .handle { case ValidationRejection(msg, _) =>
complete((InternalServerError, "That wasn't valid! " + msg))
}
- .handleAll[MethodRejection] { methodRejections ⇒
+ .handleAll[MethodRejection] { methodRejections =>
val names = methodRejections.map(_.supported.name)
complete((MethodNotAllowed, s"Can't do that! Supported: ${names mkString " or "}!"))
}
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/WebSocketExampleSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/WebSocketExampleSpec.scala
index 4b5a276129..a1c4541ba0 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/WebSocketExampleSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/WebSocketExampleSpec.scala
@@ -34,7 +34,7 @@ class WebSocketExampleSpec extends WordSpec with Matchers {
// rather we simply stream it back as the tail of the response
// this means we might start sending the response even before the
// end of the incoming message has been received
- case tm: TextMessage ⇒ TextMessage(Source.single("Hello ") ++ tm.textStream) :: Nil
+ case tm: TextMessage => TextMessage(Source.single("Hello ") ++ tm.textStream) :: Nil
case bm: BinaryMessage =>
// ignore binary messages but drain content to avoid the stream being clogged
bm.dataStream.runWith(Sink.ignore)
@@ -43,13 +43,13 @@ class WebSocketExampleSpec extends WordSpec with Matchers {
//#websocket-handler
//#websocket-request-handling
- val requestHandler: HttpRequest ⇒ HttpResponse = {
- case req @ HttpRequest(GET, Uri.Path("/greeter"), _, _, _) ⇒
+ val requestHandler: HttpRequest => HttpResponse = {
+ case req @ HttpRequest(GET, Uri.Path("/greeter"), _, _, _) =>
req.header[UpgradeToWebSocket] match {
- case Some(upgrade) ⇒ upgrade.handleMessages(greeterWebSocketService)
- case None ⇒ HttpResponse(400, entity = "Not a valid websocket request!")
+ case Some(upgrade) => upgrade.handleMessages(greeterWebSocketService)
+ case None => HttpResponse(400, entity = "Not a valid websocket request!")
}
- case _: HttpRequest ⇒ HttpResponse(404, entity = "Unknown resource!")
+ case _: HttpRequest => HttpResponse(404, entity = "Unknown resource!")
}
//#websocket-request-handling
@@ -62,7 +62,7 @@ class WebSocketExampleSpec extends WordSpec with Matchers {
import system.dispatcher // for the future transformations
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
- .onComplete(_ ⇒ system.terminate()) // and shutdown when done
+ .onComplete(_ => system.terminate()) // and shutdown when done
}
"routing-example" in {
pending // compile-time only test
@@ -83,7 +83,7 @@ class WebSocketExampleSpec extends WordSpec with Matchers {
val greeterWebSocketService =
Flow[Message]
.collect {
- case tm: TextMessage ⇒ TextMessage(Source.single("Hello ") ++ tm.textStream)
+ case tm: TextMessage => TextMessage(Source.single("Hello ") ++ tm.textStream)
// ignore binary messages
}
@@ -104,6 +104,6 @@ class WebSocketExampleSpec extends WordSpec with Matchers {
import system.dispatcher // for the future transformations
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
- .onComplete(_ ⇒ system.terminate()) // and shutdown when done
+ .onComplete(_ => system.terminate()) // and shutdown when done
}
}
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
index 656387967c..64b292a3a5 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/BasicDirectivesExamplesSpec.scala
@@ -267,14 +267,14 @@ class BasicDirectivesExamplesSpec extends RoutingSpec {
private def nonSuccessToEmptyJsonEntity(response: HttpResponse): HttpResponse =
response.status match {
- case code if code.isSuccess ⇒ response
- case code ⇒
+ case code if code.isSuccess => response
+ case code =>
log.warning("Dropping response entity since response status code was: {}", code)
response.copy(entity = NullJsonEntity)
}
/** Wrapper for all of our JSON API routes */
- def apiRoute(innerRoutes: ⇒ Route): Route =
+ def apiRoute(innerRoutes: => Route): Route =
mapResponse(nonSuccessToEmptyJsonEntity)(innerRoutes)
}
//#
@@ -388,13 +388,12 @@ class BasicDirectivesExamplesSpec extends RoutingSpec {
"mapInnerRoute" in {
//#mapInnerRoute
val completeWithInnerException =
- mapInnerRoute { route =>
- ctx =>
- try {
- route(ctx)
- } catch {
- case NonFatal(e) => ctx.complete(s"Got ${e.getClass.getSimpleName} '${e.getMessage}'")
- }
+ mapInnerRoute { route => ctx =>
+ try {
+ route(ctx)
+ } catch {
+ case NonFatal(e) => ctx.complete(s"Got ${e.getClass.getSimpleName} '${e.getMessage}'")
+ }
}
val route =
@@ -801,4 +800,4 @@ class BasicDirectivesExamplesSpec extends RoutingSpec {
//#
}
-}
\ No newline at end of file
+}
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala
index 714273846b..695f23dcf8 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/HeaderDirectivesExamplesSpec.scala
@@ -147,7 +147,7 @@ class HeaderDirectivesExamplesSpec extends RoutingSpec with Inside {
}
"headerValueByType-0" in {
val route =
- headerValueByType[Origin]() { origin ⇒
+ headerValueByType[Origin]() { origin =>
complete(s"The first origin was ${origin.origins.head}")
}
@@ -161,14 +161,14 @@ class HeaderDirectivesExamplesSpec extends RoutingSpec with Inside {
// reject a request if no header of the given type is present
Get("abc") ~> route ~> check {
- inside(rejection) { case MissingHeaderRejection("Origin") ⇒ }
+ inside(rejection) { case MissingHeaderRejection("Origin") => }
}
}
"optionalHeaderValueByType-0" in {
val route =
optionalHeaderValueByType[Origin]() {
- case Some(origin) ⇒ complete(s"The first origin was ${origin.origins.head}")
- case None ⇒ complete("No Origin header found.")
+ case Some(origin) => complete(s"The first origin was ${origin.origins.head}")
+ case None => complete("No Origin header found.")
}
val originHeader = Origin(HttpOrigin("http://localhost:8080"))
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/MiscDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/MiscDirectivesExamplesSpec.scala
index ad75799097..dfa22e6eca 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/MiscDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/MiscDirectivesExamplesSpec.scala
@@ -67,13 +67,13 @@ class MiscDirectivesExamplesSpec extends RoutingSpec {
Language("de") withQValue 0.5f)
request ~> {
- selectPreferredLanguage("en", "en-US") { lang ⇒
+ selectPreferredLanguage("en", "en-US") { lang =>
complete(lang.toString)
}
} ~> check { responseAs[String] shouldEqual "en-US" }
request ~> {
- selectPreferredLanguage("de-DE", "hu") { lang ⇒
+ selectPreferredLanguage("de-DE", "hu") { lang =>
complete(lang.toString)
}
} ~> check { responseAs[String] shouldEqual "de-DE" }
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/SchemeDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/SchemeDirectivesExamplesSpec.scala
index 42cdf2b502..9d495a1b96 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/SchemeDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/SchemeDirectivesExamplesSpec.scala
@@ -25,7 +25,7 @@ class SchemeDirectivesExamplesSpec extends RoutingSpec {
val route =
scheme("http") {
- extract(_.request.uri) { uri ⇒
+ extract(_.request.uri) { uri =>
redirect(uri.copy(scheme = "https"), MovedPermanently)
}
} ~
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/TimeoutDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/TimeoutDirectivesExamplesSpec.scala
index 5536e96d99..f0bda7181a 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/TimeoutDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/TimeoutDirectivesExamplesSpec.scala
@@ -39,7 +39,8 @@ class TimeoutDirectivesExamplesSpec extends RoutingSpec with CompileOnlySpec {
"allow mapping the response while setting the timeout" in compileOnlySpec {
//#withRequestTimeout-with-handler
- val timeoutResponse = HttpResponse(StatusCodes.EnhanceYourCalm,
+ val timeoutResponse = HttpResponse(
+ StatusCodes.EnhanceYourCalm,
entity = "Unable to serve response within time limit, please enchance your calm.")
val route =
@@ -57,7 +58,8 @@ class TimeoutDirectivesExamplesSpec extends RoutingSpec with CompileOnlySpec {
pending // compile only spec since requires actuall Http server to be run
//#withRequestTimeoutResponse
- val timeoutResponse = HttpResponse(StatusCodes.EnhanceYourCalm,
+ val timeoutResponse = HttpResponse(
+ StatusCodes.EnhanceYourCalm,
entity = "Unable to serve response within time limit, please enchance your calm.")
val route =
diff --git a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/WebSocketDirectivesExamplesSpec.scala b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/WebSocketDirectivesExamplesSpec.scala
index cb5912e855..3f8aabdc54 100644
--- a/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/WebSocketDirectivesExamplesSpec.scala
+++ b/akka-docs/rst/scala/code/docs/http/scaladsl/server/directives/WebSocketDirectivesExamplesSpec.scala
@@ -19,9 +19,9 @@ class WebSocketDirectivesExamplesSpec extends RoutingSpec {
"greeter-service" in {
def greeter: Flow[Message, Message, Any] =
Flow[Message].mapConcat {
- case tm: TextMessage ⇒
+ case tm: TextMessage =>
TextMessage(Source.single("Hello ") ++ tm.textStream ++ Source.single("!")) :: Nil
- case bm: BinaryMessage ⇒
+ case bm: BinaryMessage =>
// ignore binary messages but drain content to avoid the stream being clogged
bm.dataStream.runWith(Sink.ignore)
Nil
@@ -59,9 +59,9 @@ class WebSocketDirectivesExamplesSpec extends RoutingSpec {
"handle-multiple-protocols" in {
def greeterService: Flow[Message, Message, Any] =
Flow[Message].mapConcat {
- case tm: TextMessage ⇒
+ case tm: TextMessage =>
TextMessage(Source.single("Hello ") ++ tm.textStream ++ Source.single("!")) :: Nil
- case bm: BinaryMessage ⇒
+ case bm: BinaryMessage =>
// ignore binary messages but drain content to avoid the stream being clogged
bm.dataStream.runWith(Sink.ignore)
Nil
@@ -85,7 +85,7 @@ class WebSocketDirectivesExamplesSpec extends RoutingSpec {
WS("/services", wsClient.flow, List("other", "echo")) ~>
websocketMultipleProtocolRoute ~>
check {
- expectWebSocketUpgradeWithProtocol { protocol ⇒
+ expectWebSocketUpgradeWithProtocol { protocol =>
protocol shouldEqual "echo"
wsClient.sendMessage("Peter")
diff --git a/akka-docs/rst/scala/code/docs/pattern/BackoffSupervisorDocSpec.scala b/akka-docs/rst/scala/code/docs/pattern/BackoffSupervisorDocSpec.scala
index 366ce8bc4f..3d747f387b 100644
--- a/akka-docs/rst/scala/code/docs/pattern/BackoffSupervisorDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/pattern/BackoffSupervisorDocSpec.scala
@@ -24,7 +24,7 @@ class BackoffSupervisorDocSpec {
minBackoff = 3.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
- ))
+ ))
system.actorOf(supervisor, name = "echoSupervisor")
//#backoff-stop
@@ -44,7 +44,7 @@ class BackoffSupervisorDocSpec {
minBackoff = 3.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
- ))
+ ))
system.actorOf(supervisor, name = "echoSupervisor")
//#backoff-fail
@@ -59,14 +59,14 @@ class BackoffSupervisorDocSpec {
//#backoff-custom-stop
val supervisor = BackoffSupervisor.props(
Backoff.onStop(
- childProps,
- childName = "myEcho",
- minBackoff = 3.seconds,
- maxBackoff = 30.seconds,
- randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
- ).withManualReset // the child must send BackoffSupervisor.Reset to its parent
- .withDefaultStoppingStrategy // Stop at any Exception thrown
- )
+ childProps,
+ childName = "myEcho",
+ minBackoff = 3.seconds,
+ maxBackoff = 30.seconds,
+ randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
+ ).withManualReset // the child must send BackoffSupervisor.Reset to its parent
+ .withDefaultStoppingStrategy // Stop at any Exception thrown
+ )
//#backoff-custom-stop
system.actorOf(supervisor, name = "echoSupervisor")
@@ -86,11 +86,11 @@ class BackoffSupervisorDocSpec {
minBackoff = 3.seconds,
maxBackoff = 30.seconds,
randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly
- ).withAutoReset(10.seconds) // the child must send BackoffSupervisor.Reset to its parent
+ ).withAutoReset(10.seconds) // the child must send BackoffSupervisor.Reset to its parent
.withSupervisorStrategy(
OneForOneStrategy() {
- case _: MyException ⇒ SupervisorStrategy.Restart
- case _ ⇒ SupervisorStrategy.Escalate
+ case _: MyException => SupervisorStrategy.Restart
+ case _ => SupervisorStrategy.Escalate
}))
//#backoff-custom-fail
diff --git a/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala b/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala
index 15b377c32e..caac3a3012 100644
--- a/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala
+++ b/akka-docs/rst/scala/code/docs/pattern/SchedulerPatternSpec.scala
@@ -88,12 +88,14 @@ class SchedulerPatternSpec extends AkkaSpec {
}
"send periodic ticks from the constructor" taggedAs TimingTest in {
- testSchedule(system.actorOf(Props(classOf[ScheduleInConstructor], testActor)),
+ testSchedule(
+ system.actorOf(Props(classOf[ScheduleInConstructor], testActor)),
3000 millis, 2000 millis)
}
"send ticks from the preStart and receive" taggedAs TimingTest in {
- testSchedule(system.actorOf(Props(classOf[ScheduleInConstructor], testActor)),
+ testSchedule(
+ system.actorOf(Props(classOf[ScheduleInConstructor], testActor)),
3000 millis, 2500 millis)
}
}
diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala
index 6eba1805d1..19d2c3946c 100644
--- a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala
@@ -326,13 +326,13 @@ object PersistenceDocSpec {
override def receiveCommand: Receive = {
case c: String =>
sender() ! c
- persistAsync(c + "-outer-1") { outer ⇒
+ persistAsync(c + "-outer-1") { outer =>
sender() ! outer
- persistAsync(c + "-inner-1") { inner ⇒ sender() ! inner }
+ persistAsync(c + "-inner-1") { inner => sender() ! inner }
}
- persistAsync(c + "-outer-2") { outer ⇒
+ persistAsync(c + "-outer-2") { outer =>
sender() ! outer
- persistAsync(c + "-inner-2") { inner ⇒ sender() ! inner }
+ persistAsync(c + "-inner-2") { inner => sender() ! inner }
}
}
//#nested-persistAsync-persistAsync
diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala
index 1097121ac7..9bd651a58c 100644
--- a/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/persistence/PersistencePluginDocSpec.scala
@@ -149,17 +149,19 @@ class MyJournal extends AsyncWriteJournal {
def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = ???
def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long,
toSequenceNr: Long, max: Long)(
- replayCallback: (PersistentRepr) => Unit): Future[Unit] = ???
- def asyncReadHighestSequenceNr(persistenceId: String,
- fromSequenceNr: Long): Future[Long] = ???
+ replayCallback: (PersistentRepr) => Unit): Future[Unit] = ???
+ def asyncReadHighestSequenceNr(
+ persistenceId: String,
+ fromSequenceNr: Long): Future[Long] = ???
// optionally override:
override def receivePluginInternal: Receive = super.receivePluginInternal
}
class MySnapshotStore extends SnapshotStore {
- def loadAsync(persistenceId: String,
- criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = ???
+ def loadAsync(
+ persistenceId: String,
+ criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = ???
def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = ???
def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = ???
def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = ???
diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala
index 73ae91c6a3..1eea46f24b 100644
--- a/akka-docs/rst/scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala
@@ -247,7 +247,8 @@ class UserEventsAdapter extends EventAdapter {
case UserDetailsChanged(null, address) => EventSeq(UserAddressChanged(address))
case UserDetailsChanged(name, null) => EventSeq(UserNameChanged(name))
case UserDetailsChanged(name, address) =>
- EventSeq(UserNameChanged(name),
+ EventSeq(
+ UserNameChanged(name),
UserAddressChanged(address))
case event: V2 => EventSeq(event)
}
@@ -267,7 +268,7 @@ class RemovedEventsAwareSerializer extends SerializerWithStringManifest {
val SkipEventManifestsEvents = Set(
"docs.persistence.CustomerBlinked" // ...
- )
+ )
override def manifest(o: AnyRef): String = o.getClass.getName
diff --git a/akka-docs/rst/scala/code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala
index 6634ef4941..35206b7cd8 100644
--- a/akka-docs/rst/scala/code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala
@@ -22,13 +22,13 @@ object LeveldbPersistenceQueryDocSpec {
class MyTaggingEventAdapter extends WriteEventAdapter {
val colors = Set("green", "black", "blue")
override def toJournal(event: Any): Any = event match {
- case s: String ⇒
- var tags = colors.foldLeft(Set.empty[String]) { (acc, c) ⇒
+ case s: String =>
+ var tags = colors.foldLeft(Set.empty[String]) { (acc, c) =>
if (s.contains(c)) acc + c else acc
}
if (tags.isEmpty) event
else Tagged(event, tags)
- case _ ⇒ event
+ case _ => event
}
override def manifest(event: Any): String = ""
diff --git a/akka-docs/rst/scala/code/docs/persistence/query/MyEventsByTagPublisher.scala b/akka-docs/rst/scala/code/docs/persistence/query/MyEventsByTagPublisher.scala
index f5aaa30e96..a0f972e95c 100644
--- a/akka-docs/rst/scala/code/docs/persistence/query/MyEventsByTagPublisher.scala
+++ b/akka-docs/rst/scala/code/docs/persistence/query/MyEventsByTagPublisher.scala
@@ -39,11 +39,11 @@ class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteD
}
def receive = {
- case _: Request | Continue ⇒
+ case _: Request | Continue =>
query()
deliverBuf()
- case Cancel ⇒
+ case Cancel =>
context.stop(self)
}
@@ -79,12 +79,12 @@ class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteD
val serialization = SerializationExtension(context.system)
buf = result.map {
- case (id, bytes) ⇒
+ case (id, bytes) =>
val p = serialization.deserialize(bytes, classOf[PersistentRepr]).get
EventEnvelope(offset = id, p.persistenceId, p.sequenceNr, p.payload)
}
} catch {
- case e: Exception ⇒
+ case e: Exception =>
onErrorThenStop(e)
}
}
@@ -101,4 +101,4 @@ class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteD
}
}
}
-//#events-by-tag-publisher
\ No newline at end of file
+//#events-by-tag-publisher
diff --git a/akka-docs/rst/scala/code/docs/persistence/query/PersistenceQueryDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/query/PersistenceQueryDocSpec.scala
index 9d7084a097..bc915b886d 100644
--- a/akka-docs/rst/scala/code/docs/persistence/query/PersistenceQueryDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/persistence/query/PersistenceQueryDocSpec.scala
@@ -57,7 +57,7 @@ object PersistenceQueryDocSpec {
tag: String, offset: Long = 0L): Source[EventEnvelope, NotUsed] = {
val props = MyEventsByTagPublisher.props(tag, offset, refreshInterval)
Source.actorPublisher[EventEnvelope](props)
- .mapMaterializedValue(_ ⇒ NotUsed)
+ .mapMaterializedValue(_ => NotUsed)
}
override def eventsByPersistenceId(
diff --git a/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala b/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala
index 9442abe106..a9245e4e1a 100644
--- a/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/routing/CustomRouterDocSpec.scala
@@ -124,7 +124,8 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl
val paths = for (n <- 1 to 10) yield ("/user/s" + n)
val redundancy1: ActorRef =
- system.actorOf(RedundancyGroup(paths, nbrCopies = 3).props(),
+ system.actorOf(
+ RedundancyGroup(paths, nbrCopies = 3).props(),
name = "redundancy1")
redundancy1 ! "important"
//#usage-1
@@ -132,7 +133,8 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl
for (_ <- 1 to 3) expectMsg("important")
//#usage-2
- val redundancy2: ActorRef = system.actorOf(FromConfig.props(),
+ val redundancy2: ActorRef = system.actorOf(
+ FromConfig.props(),
name = "redundancy2")
redundancy2 ! "very important"
//#usage-2
diff --git a/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala b/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala
index a53740eb1c..29b13a85ba 100644
--- a/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/routing/RouterDocSpec.scala
@@ -415,7 +415,8 @@ router-dispatcher {}
//#scatter-gather-group-2
val router20: ActorRef =
- context.actorOf(ScatterGatherFirstCompletedGroup(paths,
+ context.actorOf(ScatterGatherFirstCompletedGroup(
+ paths,
within = 10.seconds).props(), "router20")
//#scatter-gather-group-2
@@ -437,7 +438,8 @@ router-dispatcher {}
//#tail-chopping-group-2
val router24: ActorRef =
- context.actorOf(TailChoppingGroup(paths,
+ context.actorOf(TailChoppingGroup(
+ paths,
within = 10.seconds, interval = 20.millis).props(), "router24")
//#tail-chopping-group-2
@@ -448,7 +450,8 @@ router-dispatcher {}
//#consistent-hashing-pool-2
val router26: ActorRef =
- context.actorOf(ConsistentHashingPool(5).props(Props[Worker]),
+ context.actorOf(
+ ConsistentHashingPool(5).props(Props[Worker]),
"router26")
//#consistent-hashing-pool-2
@@ -470,7 +473,8 @@ router-dispatcher {}
//#resize-pool-2
val resizer = DefaultResizer(lowerBound = 2, upperBound = 15)
val router30: ActorRef =
- context.actorOf(RoundRobinPool(5, Some(resizer)).props(Props[Worker]),
+ context.actorOf(
+ RoundRobinPool(5, Some(resizer)).props(Props[Worker]),
"router30")
//#resize-pool-2
diff --git a/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala
index 5a144dc719..f810790756 100644
--- a/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala
@@ -38,8 +38,9 @@ package docs.serialization {
// "fromBinary" deserializes the given array,
// using the type hint (if any, see "includeManifest" above)
- def fromBinary(bytes: Array[Byte],
- clazz: Option[Class[_]]): AnyRef = {
+ def fromBinary(
+ bytes: Array[Byte],
+ clazz: Option[Class[_]]): AnyRef = {
// Put your code that deserializes here
//#...
null
diff --git a/akka-docs/rst/scala/code/docs/stream/CompositionDocSpec.scala b/akka-docs/rst/scala/code/docs/stream/CompositionDocSpec.scala
index 645409110a..6e0363fc93 100644
--- a/akka-docs/rst/scala/code/docs/stream/CompositionDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/stream/CompositionDocSpec.scala
@@ -216,8 +216,9 @@ class CompositionDocSpec extends AkkaSpec {
def close() = p.trySuccess(None)
}
- def f(p: Promise[Option[Int]],
- rest: (Future[OutgoingConnection], Future[String])): Future[MyClass] = {
+ def f(
+ p: Promise[Option[Int]],
+ rest: (Future[OutgoingConnection], Future[String])): Future[MyClass] = {
val connFuture = rest._1
connFuture.map(MyClass(p, _))
diff --git a/akka-docs/rst/scala/code/docs/stream/FlowDocSpec.scala b/akka-docs/rst/scala/code/docs/stream/FlowDocSpec.scala
index ad1e135555..a54f63d644 100644
--- a/akka-docs/rst/scala/code/docs/stream/FlowDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/stream/FlowDocSpec.scala
@@ -149,12 +149,11 @@ class FlowDocSpec extends AkkaSpec {
"various ways of transforming materialized values" in {
import scala.concurrent.duration._
- val throttler = Flow.fromGraph(GraphDSL.create(Source.tick(1.second, 1.second, "test")) { implicit builder =>
- tickSource =>
- import GraphDSL.Implicits._
- val zip = builder.add(ZipWith[String, Int, Int](Keep.right))
- tickSource ~> zip.in0
- FlowShape(zip.in1, zip.out)
+ val throttler = Flow.fromGraph(GraphDSL.create(Source.tick(1.second, 1.second, "test")) { implicit builder => tickSource =>
+ import GraphDSL.Implicits._
+ val zip = builder.add(ZipWith[String, Int, Int](Keep.right))
+ tickSource ~> zip.in0
+ FlowShape(zip.in1, zip.out)
})
//#flow-mat-combine
@@ -212,11 +211,10 @@ class FlowDocSpec extends AkkaSpec {
// The result of r11 can be also achieved by using the Graph API
val r12: RunnableGraph[(Promise[Option[Int]], Cancellable, Future[Int])] =
- RunnableGraph.fromGraph(GraphDSL.create(source, flow, sink)((_, _, _)) { implicit builder =>
- (src, f, dst) =>
- import GraphDSL.Implicits._
- src ~> f ~> dst
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(source, flow, sink)((_, _, _)) { implicit builder => (src, f, dst) =>
+ import GraphDSL.Implicits._
+ src ~> f ~> dst
+ ClosedShape
})
//#flow-mat-combine
diff --git a/akka-docs/rst/scala/code/docs/stream/GraphDSLDocSpec.scala b/akka-docs/rst/scala/code/docs/stream/GraphDSLDocSpec.scala
index 6aa668352b..40e91146bf 100644
--- a/akka-docs/rst/scala/code/docs/stream/GraphDSLDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/stream/GraphDSLDocSpec.scala
@@ -97,9 +97,9 @@ class GraphDSLDocSpec extends AkkaSpec {
// A shape represents the input and output ports of a reusable
// processing module
case class PriorityWorkerPoolShape[In, Out](
- jobsIn: Inlet[In],
+ jobsIn: Inlet[In],
priorityJobsIn: Inlet[In],
- resultsOut: Outlet[Out]) extends Shape {
+ resultsOut: Outlet[Out]) extends Shape {
// It is important to provide the list of all input and output
// ports with a stable order. Duplicates are not allowed.
@@ -117,7 +117,7 @@ class GraphDSLDocSpec extends AkkaSpec {
// A Shape must also be able to create itself from existing ports
override def copyFromPorts(
- inlets: immutable.Seq[Inlet[_]],
+ inlets: immutable.Seq[Inlet[_]],
outlets: immutable.Seq[Outlet[_]]) = {
assert(inlets.size == this.inlets.size)
assert(outlets.size == this.outlets.size)
@@ -130,10 +130,10 @@ class GraphDSLDocSpec extends AkkaSpec {
//#graph-dsl-components-create
object PriorityWorkerPool {
def apply[In, Out](
- worker: Flow[In, Out, Any],
+ worker: Flow[In, Out, Any],
workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = {
- GraphDSL.create() { implicit b ⇒
+ GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
val priorityMerge = b.add(MergePreferred[In](1))
@@ -203,10 +203,8 @@ class GraphDSLDocSpec extends AkkaSpec {
"access to materialized value" in {
//#graph-dsl-matvalue
import GraphDSL.Implicits._
- val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) {
- implicit builder ⇒
- fold ⇒
- FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet)
+ val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) { implicit builder => fold =>
+ FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet)
})
//#graph-dsl-matvalue
@@ -215,16 +213,14 @@ class GraphDSLDocSpec extends AkkaSpec {
//#graph-dsl-matvalue-cycle
import GraphDSL.Implicits._
// This cannot produce any value:
- val cyclicFold: Source[Int, Future[Int]] = Source.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) {
- implicit builder =>
- fold =>
- // - Fold cannot complete until its upstream mapAsync completes
- // - mapAsync cannot complete until the materialized Future produced by
- // fold completes
- // As a result this Source will never emit anything, and its materialited
- // Future will never complete
- builder.materializedValue.mapAsync(4)(identity) ~> fold
- SourceShape(builder.materializedValue.mapAsync(4)(identity).outlet)
+ val cyclicFold: Source[Int, Future[Int]] = Source.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) { implicit builder => fold =>
+ // - Fold cannot complete until its upstream mapAsync completes
+ // - mapAsync cannot complete until the materialized Future produced by
+ // fold completes
+ // As a result this Source will never emit anything, and its materialited
+ // Future will never complete
+ builder.materializedValue.mapAsync(4)(identity) ~> fold
+ SourceShape(builder.materializedValue.mapAsync(4)(identity).outlet)
})
//#graph-dsl-matvalue-cycle
}
diff --git a/akka-docs/rst/scala/code/docs/stream/StreamPartialGraphDSLDocSpec.scala b/akka-docs/rst/scala/code/docs/stream/StreamPartialGraphDSLDocSpec.scala
index ba18bcaced..19a1806d53 100644
--- a/akka-docs/rst/scala/code/docs/stream/StreamPartialGraphDSLDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/stream/StreamPartialGraphDSLDocSpec.scala
@@ -31,18 +31,17 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec {
val resultSink = Sink.head[Int]
- val g = RunnableGraph.fromGraph(GraphDSL.create(resultSink) { implicit b =>
- sink =>
- import GraphDSL.Implicits._
+ val g = RunnableGraph.fromGraph(GraphDSL.create(resultSink) { implicit b => sink =>
+ import GraphDSL.Implicits._
- // importing the partial graph will return its shape (inlets & outlets)
- val pm3 = b.add(pickMaxOfThree)
+ // importing the partial graph will return its shape (inlets & outlets)
+ val pm3 = b.add(pickMaxOfThree)
- Source.single(1) ~> pm3.in(0)
- Source.single(2) ~> pm3.in(1)
- Source.single(3) ~> pm3.in(2)
- pm3.out ~> sink.in
- ClosedShape
+ Source.single(1) ~> pm3.in(0)
+ Source.single(2) ~> pm3.in(1)
+ Source.single(3) ~> pm3.in(2)
+ pm3.out ~> sink.in
+ ClosedShape
})
val max: Future[Int] = g.run()
diff --git a/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeDroppyBroadcast.scala b/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeDroppyBroadcast.scala
index 704309629d..72cf648c2e 100644
--- a/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeDroppyBroadcast.scala
+++ b/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeDroppyBroadcast.scala
@@ -23,17 +23,16 @@ class RecipeDroppyBroadcast extends RecipeSpec {
val mySink3 = Sink.fromSubscriber(sub3)
//#droppy-bcast
- val graph = RunnableGraph.fromGraph(GraphDSL.create(mySink1, mySink2, mySink3)((_, _, _)) { implicit b =>
- (sink1, sink2, sink3) =>
- import GraphDSL.Implicits._
+ val graph = RunnableGraph.fromGraph(GraphDSL.create(mySink1, mySink2, mySink3)((_, _, _)) { implicit b => (sink1, sink2, sink3) =>
+ import GraphDSL.Implicits._
- val bcast = b.add(Broadcast[Int](3))
- myElements ~> bcast
+ val bcast = b.add(Broadcast[Int](3))
+ myElements ~> bcast
- bcast.buffer(10, OverflowStrategy.dropHead) ~> sink1
- bcast.buffer(10, OverflowStrategy.dropHead) ~> sink2
- bcast.buffer(10, OverflowStrategy.dropHead) ~> sink3
- ClosedShape
+ bcast.buffer(10, OverflowStrategy.dropHead) ~> sink1
+ bcast.buffer(10, OverflowStrategy.dropHead) ~> sink2
+ bcast.buffer(10, OverflowStrategy.dropHead) ~> sink3
+ ClosedShape
})
//#droppy-bcast
diff --git a/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeReduceByKey.scala b/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeReduceByKey.scala
index 15c07825e9..908dd7d797 100644
--- a/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeReduceByKey.scala
+++ b/akka-docs/rst/scala/code/docs/stream/cookbook/RecipeReduceByKey.scala
@@ -45,8 +45,8 @@ class RecipeReduceByKey extends RecipeSpec {
//#reduce-by-key-general
def reduceByKey[In, K, Out](
maximumGroupSize: Int,
- groupKey: (In) => K,
- map: (In) => Out)(reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
+ groupKey: (In) => K,
+ map: (In) => Out)(reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = {
Flow[In]
.groupBy[K](maximumGroupSize, groupKey)
@@ -56,7 +56,8 @@ class RecipeReduceByKey extends RecipeSpec {
}
val wordCounts = words.via(
- reduceByKey(MaximumDistinctWords,
+ reduceByKey(
+ MaximumDistinctWords,
groupKey = (word: String) => word,
map = (word: String) => 1)((left: Int, right: Int) => left + right))
//#reduce-by-key-general
diff --git a/akka-docs/rst/scala/code/docs/stream/io/StreamTcpDocSpec.scala b/akka-docs/rst/scala/code/docs/stream/io/StreamTcpDocSpec.scala
index 790e2377dd..366a21c69a 100644
--- a/akka-docs/rst/scala/code/docs/stream/io/StreamTcpDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/stream/io/StreamTcpDocSpec.scala
@@ -85,7 +85,7 @@ class StreamTcpDocSpec extends AkkaSpec {
allowTruncation = true))
.map(_.utf8String)
//#welcome-banner-chat-server
- .map { command ⇒ serverProbe.ref ! command; command }
+ .map { command => serverProbe.ref ! command; command }
//#welcome-banner-chat-server
.via(commandParser)
// merge in the initial banner after parser
@@ -102,8 +102,8 @@ class StreamTcpDocSpec extends AkkaSpec {
val input = new AtomicReference("Hello world" :: "What a lovely day" :: Nil)
def readLine(prompt: String): String = {
input.get() match {
- case all @ cmd :: tail if input.compareAndSet(all, tail) ⇒ cmd
- case _ ⇒ "q"
+ case all @ cmd :: tail if input.compareAndSet(all, tail) => cmd
+ case _ => "q"
}
}
diff --git a/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala b/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala
index c8124fc1aa..3cae559c51 100644
--- a/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala
+++ b/akka-docs/rst/scala/code/docs/testkit/TestKitUsageSpec.scala
@@ -26,7 +26,8 @@ import scala.collection.immutable
* a Test to show some TestKit examples
*/
class TestKitUsageSpec
- extends TestKit(ActorSystem("TestKitUsageSpec",
+ extends TestKit(ActorSystem(
+ "TestKitUsageSpec",
ConfigFactory.parseString(TestKitUsageSpec.config)))
with DefaultTimeout with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll {
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala
index c0c2779c06..3b61836874 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala
@@ -50,9 +50,10 @@ private[http] object OutgoingConnectionBlueprint {
| Merge |<------------------------------------------ V
+------------+
*/
- def apply(hostHeader: headers.Host,
- settings: ClientConnectionSettings,
- log: LoggingAdapter): Http.ClientLayer = {
+ def apply(
+ hostHeader: headers.Host,
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter): Http.ClientLayer = {
import settings._
val core = BidiFlow.fromGraph(GraphDSL.create() { implicit b ⇒
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala
index be4ccb15ca..98f7ab171c 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala
@@ -21,9 +21,9 @@ private object PoolConductor {
import PoolSlot.{ RawSlotEvent, SlotEvent }
case class Ports(
- requestIn: Inlet[RequestContext],
+ requestIn: Inlet[RequestContext],
slotEventIn: Inlet[RawSlotEvent],
- slotOuts: immutable.Seq[Outlet[RequestContext]]) extends Shape {
+ slotOuts: immutable.Seq[Outlet[RequestContext]]) extends Shape {
override val inlets = requestIn :: slotEventIn :: Nil
override def outlets = slotOuts
@@ -194,7 +194,7 @@ private object PoolConductor {
@tailrec def bestSlot(ix: Int = 0, bestIx: Int = -1, bestState: SlotState = Busy): Int =
if (ix < slotStates.length) {
val pl = pipeliningLimit
- slotStates(ix) -> bestState match {
+ slotStates(ix) → bestState match {
case (Idle, _) ⇒ ix
case (Unconnected, Loaded(_) | Busy) ⇒ bestSlot(ix + 1, ix, Unconnected)
case (x @ Loaded(a), Loaded(b)) if a < b ⇒ bestSlot(ix + 1, ix, x)
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala
index 1bf95ebb42..56799a1a98 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala
@@ -67,9 +67,11 @@ private object PoolFlow {
- Simple merge of the Connection Slots' outputs
*/
- def apply(connectionFlow: Flow[HttpRequest, HttpResponse, Future[Http.OutgoingConnection]],
- settings: ConnectionPoolSettings, log: LoggingAdapter)(
- implicit system: ActorSystem, fm: Materializer): Flow[RequestContext, ResponseContext, NotUsed] =
+ def apply(
+ connectionFlow: Flow[HttpRequest, HttpResponse, Future[Http.OutgoingConnection]],
+ settings: ConnectionPoolSettings, log: LoggingAdapter)(
+ implicit
+ system: ActorSystem, fm: Materializer): Flow[RequestContext, ResponseContext, NotUsed] =
Flow.fromGraph(GraphDSL.create[FlowShape[RequestContext, ResponseContext]]() { implicit b ⇒
import settings._
import GraphDSL.Implicits._
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolMasterActor.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolMasterActor.scala
index e9de06d473..44b4de5859 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolMasterActor.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolMasterActor.scala
@@ -53,8 +53,8 @@ private[http] final class PoolMasterActor extends Actor with ActorLogging {
}
val props = Props(new PoolInterfaceActor(gateway)).withDeploy(Deploy.local)
val ref = context.actorOf(props, PoolInterfaceActor.name.next())
- poolStatus += gateway -> PoolInterfaceRunning(ref)
- poolInterfaces += ref -> gateway
+ poolStatus += gateway → PoolInterfaceRunning(ref)
+ poolInterfaces += ref → gateway
context.watch(ref)
}
@@ -93,7 +93,7 @@ private[http] final class PoolMasterActor extends Actor with ActorLogging {
// to this actor by the pool actor, they will be retried once the shutdown
// has completed.
ref ! PoolInterfaceActor.Shutdown
- poolStatus += gateway -> PoolInterfaceShuttingDown(shutdownCompletedPromise)
+ poolStatus += gateway → PoolInterfaceShuttingDown(shutdownCompletedPromise)
case PoolInterfaceShuttingDown(formerPromise) ⇒
// Pool is already shutting down, mirror the existing promise.
shutdownCompletedPromise.tryCompleteWith(formerPromise.future)
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala
index b712f17b6d..191ee07112 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala
@@ -48,7 +48,8 @@ private object PoolSlot {
v
*/
def apply(slotIx: Int, connectionFlow: Flow[HttpRequest, HttpResponse, Any],
- settings: ConnectionPoolSettings)(implicit system: ActorSystem,
+ settings: ConnectionPoolSettings)(implicit
+ system: ActorSystem,
fm: Materializer): Graph[FanOutShape2[RequestContext, ResponseContext, RawSlotEvent], Any] =
GraphDSL.create() { implicit b ⇒
import GraphDSL.Implicits._
@@ -57,7 +58,8 @@ private object PoolSlot {
val name = slotProcessorActorName.next()
val slotProcessor = b.add {
Flow.fromProcessor { () ⇒
- val actor = system.actorOf(Props(new SlotProcessor(slotIx, connectionFlow, settings)).withDeploy(Deploy.local),
+ val actor = system.actorOf(
+ Props(new SlotProcessor(slotIx, connectionFlow, settings)).withDeploy(Deploy.local),
name)
ActorProcessor[RequestContext, List[ProcessorOut]](actor)
}.mapConcat(ConstantFun.scalaIdentityFunction)
@@ -66,7 +68,8 @@ private object PoolSlot {
slotProcessor ~> split.in
- new FanOutShape2(slotProcessor.in,
+ new FanOutShape2(
+ slotProcessor.in,
split.out(0).collect { case ResponseDelivery(r) ⇒ r }.outlet,
split.out(1).collect { case r: RawSlotEvent ⇒ r }.outlet)
}
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala
index 342e5183e8..19ac33da07 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala
@@ -25,17 +25,19 @@ import akka.stream.impl.fusing.SubSource
*
* see: http://tools.ietf.org/html/rfc2046#section-5.1.1
*/
-private[http] final class BodyPartParser(defaultContentType: ContentType,
- boundary: String,
- log: LoggingAdapter,
- settings: BodyPartParser.Settings)
+private[http] final class BodyPartParser(
+ defaultContentType: ContentType,
+ boundary: String,
+ log: LoggingAdapter,
+ settings: BodyPartParser.Settings)
extends GraphStage[FlowShape[ByteString, BodyPartParser.Output]] {
import BodyPartParser._
import settings._
require(boundary.nonEmpty, "'boundary' parameter of multipart Content-Type must be non-empty")
require(boundary.charAt(boundary.length - 1) != ' ', "'boundary' parameter of multipart Content-Type must not end with a space char")
- require(boundaryChar matchesAll boundary,
+ require(
+ boundaryChar matchesAll boundary,
s"'boundary' parameter of multipart Content-Type contains illegal character '${boundaryChar.firstMismatch(boundary).get}'")
sealed trait StateResult // phantom type for ensuring soundness of our parsing method setup
@@ -68,7 +70,7 @@ private[http] final class BodyPartParser(defaultContentType: ContentType,
override def createLogic(attributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) with InHandler with OutHandler {
private var output = collection.immutable.Queue.empty[Output] // FIXME this probably is too wasteful
- private var state: ByteString => StateResult = tryParseInitialBoundary
+ private var state: ByteString ⇒ StateResult = tryParseInitialBoundary
private var shouldTerminate = false
override def onPush(): Unit = {
@@ -76,8 +78,8 @@ private[http] final class BodyPartParser(defaultContentType: ContentType,
val elem = grab(in)
try state(elem)
catch {
- case e: ParsingException => fail(e.info)
- case NotEnoughDataException =>
+ case e: ParsingException ⇒ fail(e.info)
+ case NotEnoughDataException ⇒
// we are missing a try/catch{continue} wrapper somewhere
throw new IllegalStateException("unexpected NotEnoughDataException", NotEnoughDataException)
}
@@ -105,8 +107,8 @@ private[http] final class BodyPartParser(defaultContentType: ContentType,
if (illegalHeaderWarnings) log.warning(errorInfo.withSummaryPrepended("Illegal multipart header").formatPretty)
def tryParseInitialBoundary(input: ByteString): StateResult =
- // we don't use boyerMoore here because we are testing for the boundary *without* a
- // preceding CRLF and at a known location (the very beginning of the entity)
+ // we don't use boyerMoore here because we are testing for the boundary *without* a
+ // preceding CRLF and at a known location (the very beginning of the entity)
try {
if (boundary(input, 0)) {
val ix = boundaryLength
@@ -136,7 +138,7 @@ private[http] final class BodyPartParser(defaultContentType: ContentType,
def contentType =
cth match {
case Some(x) ⇒ x.contentType
- case None ⇒ defaultContentType
+ case None ⇒ defaultContentType
}
var lineEnd = 0
@@ -181,7 +183,8 @@ private[http] final class BodyPartParser(defaultContentType: ContentType,
def parseEntity(headers: List[HttpHeader], contentType: ContentType,
emitPartChunk: (List[HttpHeader], ContentType, ByteString) ⇒ Unit = {
(headers, ct, bytes) ⇒
- emit(BodyPartStart(headers, entityParts ⇒ HttpEntity.IndefiniteLength(ct,
+ emit(BodyPartStart(headers, entityParts ⇒ HttpEntity.IndefiniteLength(
+ ct,
entityParts.collect { case EntityPart(data) ⇒ data })))
emit(bytes)
},
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala
index 8b0cd00339..fa4b23637a 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala
@@ -59,15 +59,15 @@ import akka.http.impl.model.parser.CharacterClasses._
* cannot hold more then 255 items, so this array has a fixed size of 255.
*/
private[engine] final class HttpHeaderParser private (
- val settings: HttpHeaderParser.Settings,
- onIllegalHeader: ErrorInfo ⇒ Unit,
- private[this] var nodes: Array[Char] = new Array(512), // initial size, can grow as needed
- private[this] var nodeCount: Int = 0,
- private[this] var branchData: Array[Short] = new Array(254 * 3),
- private[this] var branchDataCount: Int = 0,
- private[this] var values: Array[AnyRef] = new Array(255), // fixed size of 255
- private[this] var valueCount: Int = 0,
- private[this] var trieIsPrivate: Boolean = false) { // signals the trie data can be mutated w/o having to copy first
+ val settings: HttpHeaderParser.Settings,
+ onIllegalHeader: ErrorInfo ⇒ Unit,
+ private[this] var nodes: Array[Char] = new Array(512), // initial size, can grow as needed
+ private[this] var nodeCount: Int = 0,
+ private[this] var branchData: Array[Short] = new Array(254 * 3),
+ private[this] var branchDataCount: Int = 0,
+ private[this] var values: Array[AnyRef] = new Array(255), // fixed size of 255
+ private[this] var valueCount: Int = 0,
+ private[this] var trieIsPrivate: Boolean = false) { // signals the trie data can be mutated w/o having to copy first
// TODO: evaluate whether switching to a value-class-based approach allows us to improve code readability without sacrificing performance
@@ -300,7 +300,7 @@ private[engine] final class HttpHeaderParser private (
val prefixedLines = lines.zipWithIndex map {
case (line, ix) ⇒ (if (ix < mainIx) p1 else if (ix > mainIx) p3 else p2) :: line
}
- prefixedLines -> mainIx
+ prefixedLines → mainIx
}
def branchLines(dataIx: Int, p1: String, p2: String, p3: String) = branchData(dataIx) match {
case 0 ⇒ Seq.empty
@@ -315,9 +315,9 @@ private[engine] final class HttpHeaderParser private (
case ValueBranch(_, valueParser, branchRootNodeIx, _) ⇒
val pad = " " * (valueParser.headerName.length + 3)
recurseAndPrefixLines(branchRootNodeIx, pad, "(" + valueParser.headerName + ")-", pad)
- case vp: HeaderValueParser ⇒ Seq(" (" :: vp.headerName :: ")" :: Nil) -> 0
- case value: RawHeader ⇒ Seq(" *" :: value.toString :: Nil) -> 0
- case value ⇒ Seq(" " :: value.toString :: Nil) -> 0
+ case vp: HeaderValueParser ⇒ Seq(" (" :: vp.headerName :: ")" :: Nil) → 0
+ case value: RawHeader ⇒ Seq(" *" :: value.toString :: Nil) → 0
+ case value ⇒ Seq(" " :: value.toString :: Nil) → 0
}
case nodeChar ⇒
val rix = rowIx(msb)
@@ -350,7 +350,7 @@ private[engine] final class HttpHeaderParser private (
node >>> 8 match {
case 0 ⇒ build(nodeIx + 1)
case msb if (node & 0xFF) == 0 ⇒ values(msb - 1) match {
- case ValueBranch(_, parser, _, count) ⇒ Map(parser.headerName -> count)
+ case ValueBranch(_, parser, _, count) ⇒ Map(parser.headerName → count)
case _ ⇒ Map.empty
}
case msb ⇒
@@ -482,7 +482,7 @@ private[http] object HttpHeaderParser {
onIllegalHeader(error.withSummaryPrepended(s"Illegal '$headerName' header"))
RawHeader(headerName, trimmedHeaderValue)
}
- header -> endIx
+ header → endIx
}
}
@@ -490,7 +490,7 @@ private[http] object HttpHeaderParser {
extends HeaderValueParser(headerName, maxValueCount) {
def apply(hhp: HttpHeaderParser, input: ByteString, valueStart: Int, onIllegalHeader: ErrorInfo ⇒ Unit): (HttpHeader, Int) = {
val (headerValue, endIx) = scanHeaderValue(hhp, input, valueStart, valueStart + maxHeaderValueLength + 2)()
- RawHeader(headerName, headerValue.trim) -> endIx
+ RawHeader(headerName, headerValue.trim) → endIx
}
}
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala
index 831bec2474..781873cdfb 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala
@@ -19,13 +19,14 @@ import akka.http.scaladsl.model._
import headers._
import HttpProtocols._
import ParserOutput._
-import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
+import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
/**
* INTERNAL API
*/
-private[http] abstract class HttpMessageParser[Output >: MessageOutput <: ParserOutput](val settings: ParserSettings,
- val headerParser: HttpHeaderParser) { self ⇒
+private[http] abstract class HttpMessageParser[Output >: MessageOutput <: ParserOutput](
+ val settings: ParserSettings,
+ val headerParser: HttpHeaderParser) { self ⇒
import HttpMessageParser._
import settings._
@@ -191,8 +192,9 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser
clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult
- def parseFixedLengthBody(remainingBodyBytes: Long,
- isLastMessage: Boolean)(input: ByteString, bodyStart: Int): StateResult = {
+ def parseFixedLengthBody(
+ remainingBodyBytes: Long,
+ isLastMessage: Boolean)(input: ByteString, bodyStart: Int): StateResult = {
val remainingInputBytes = input.length - bodyStart
if (remainingInputBytes > 0) {
if (remainingInputBytes < remainingBodyBytes) {
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala
index 4ca75b3922..3fe26250ac 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala
@@ -18,9 +18,10 @@ import ParserOutput._
/**
* INTERNAL API
*/
-private[http] class HttpRequestParser(_settings: ParserSettings,
- rawRequestUriHeader: Boolean,
- _headerParser: HttpHeaderParser)
+private[http] class HttpRequestParser(
+ _settings: ParserSettings,
+ rawRequestUriHeader: Boolean,
+ _headerParser: HttpHeaderParser)
extends HttpMessageParser[RequestOutput](_settings, _headerParser) {
import HttpMessageParser._
import settings._
@@ -54,7 +55,8 @@ private[http] class HttpRequestParser(_settings: ParserSettings,
}
case c ⇒ parseCustomMethod(ix + 1, sb.append(c))
}
- } else throw new ParsingException(BadRequest,
+ } else throw new ParsingException(
+ BadRequest,
ErrorInfo("Unsupported HTTP method", s"HTTP method too long (started with '${sb.toString}'). " +
"Increase `akka.http.server.parsing.max-method-length` to support HTTP methods with more characters."))
@@ -93,7 +95,8 @@ private[http] class HttpRequestParser(_settings: ParserSettings,
if (ix == input.length) throw NotEnoughDataException
else if (CharacterClasses.WSPCRLF(input(ix).toChar)) ix
else if (ix < uriEndLimit) findUriEnd(ix + 1)
- else throw new ParsingException(RequestUriTooLong,
+ else throw new ParsingException(
+ RequestUriTooLong,
s"URI length exceeds the configured limit of $maxUriLength characters")
val uriEnd = findUriEnd()
@@ -113,8 +116,9 @@ private[http] class HttpRequestParser(_settings: ParserSettings,
clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult =
if (hostHeaderPresent || protocol == HttpProtocols.`HTTP/1.0`) {
- def emitRequestStart(createEntity: EntityCreator[RequestOutput, RequestEntity],
- headers: List[HttpHeader] = headers) = {
+ def emitRequestStart(
+ createEntity: EntityCreator[RequestOutput, RequestEntity],
+ headers: List[HttpHeader] = headers) = {
val allHeaders0 =
if (rawRequestUriHeader) `Raw-Request-URI`(new String(uriBytes, HttpCharsets.`US-ASCII`.nioCharset)) :: headers
else headers
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala
index cf0c829c64..2d65fe6c2d 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala
@@ -88,8 +88,9 @@ private[http] class HttpResponseParser(_settings: ParserSettings, _headerParser:
clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`],
expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult = {
- def emitResponseStart(createEntity: EntityCreator[ResponseOutput, ResponseEntity],
- headers: List[HttpHeader] = headers) = {
+ def emitResponseStart(
+ createEntity: EntityCreator[ResponseOutput, ResponseEntity],
+ headers: List[HttpHeader] = headers) = {
val close =
contextForCurrentResponse.get.oneHundredContinueTrigger match {
case None ⇒ closeAfterResponseCompletion
@@ -175,8 +176,9 @@ private[http] object HttpResponseParser {
* a promise whose completion either triggers the sending of the (suspended)
* request entity or the closing of the connection (for error completion)
*/
- private[http] final case class ResponseContext(requestMethod: HttpMethod,
- oneHundredContinueTrigger: Option[Promise[Unit]])
+ private[http] final case class ResponseContext(
+ requestMethod: HttpMethod,
+ oneHundredContinueTrigger: Option[Promise[Unit]])
private[http] object OneHundredContinueError
extends RuntimeException("Received error response for request with `Expect: 100-continue` header")
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala
index 0530010e32..278c71203f 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala
@@ -26,19 +26,19 @@ private[http] object ParserOutput {
sealed trait ErrorOutput extends MessageOutput
final case class RequestStart(
- method: HttpMethod,
- uri: Uri,
- protocol: HttpProtocol,
- headers: List[HttpHeader],
- createEntity: EntityCreator[RequestOutput, RequestEntity],
+ method: HttpMethod,
+ uri: Uri,
+ protocol: HttpProtocol,
+ headers: List[HttpHeader],
+ createEntity: EntityCreator[RequestOutput, RequestEntity],
expect100Continue: Boolean,
- closeRequested: Boolean) extends MessageStart with RequestOutput
+ closeRequested: Boolean) extends MessageStart with RequestOutput
final case class ResponseStart(
- statusCode: StatusCode,
- protocol: HttpProtocol,
- headers: List[HttpHeader],
- createEntity: EntityCreator[ResponseOutput, ResponseEntity],
+ statusCode: StatusCode,
+ protocol: HttpProtocol,
+ headers: List[HttpHeader],
+ createEntity: EntityCreator[ResponseOutput, ResponseEntity],
closeRequested: Boolean) extends MessageStart with ResponseOutput
case object MessageEnd extends MessageOutput
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala
index 449f7784dd..6fe8fb6d90 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala
@@ -51,8 +51,9 @@ package parsing {
/**
* INTERNAL API
*/
- private[parsing] class ParsingException(val status: StatusCode,
- val info: ErrorInfo) extends RuntimeException(info.formatPretty) {
+ private[parsing] class ParsingException(
+ val status: StatusCode,
+ val info: ErrorInfo) extends RuntimeException(info.formatPretty) {
def this(status: StatusCode, summary: String = "") =
this(status, ErrorInfo(if (summary.isEmpty) status.defaultMessage else summary))
def this(summary: String) =
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala
index 7f341060c4..4cfae5850d 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala
@@ -25,10 +25,11 @@ import scala.concurrent.forkjoin.ThreadLocalRandom
*/
private[http] object BodyPartRenderer {
- def streamed(boundary: String,
- nioCharset: Charset,
- partHeadersSizeHint: Int,
- log: LoggingAdapter): PushPullStage[Multipart.BodyPart, Source[ChunkStreamPart, Any]] =
+ def streamed(
+ boundary: String,
+ nioCharset: Charset,
+ partHeadersSizeHint: Int,
+ log: LoggingAdapter): PushPullStage[Multipart.BodyPart, Source[ChunkStreamPart, Any]] =
new PushPullStage[Multipart.BodyPart, Source[ChunkStreamPart, Any]] {
var firstBoundaryRendered = false
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala
index 18c6d610b9..ea797d9906 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala
@@ -22,9 +22,10 @@ import headers._
/**
* INTERNAL API
*/
-private[http] class HttpRequestRendererFactory(userAgentHeader: Option[headers.`User-Agent`],
- requestHeaderSizeHint: Int,
- log: LoggingAdapter) {
+private[http] class HttpRequestRendererFactory(
+ userAgentHeader: Option[headers.`User-Agent`],
+ requestHeaderSizeHint: Int,
+ log: LoggingAdapter) {
import HttpRequestRendererFactory.RequestRenderingOutput
def renderToSource(ctx: RequestRenderingContext): Source[ByteString, Any] = render(ctx).byteStream
@@ -175,6 +176,6 @@ private[http] object HttpRequestRendererFactory {
* if the future is completed with an error the connection is to be closed.
*/
private[http] final case class RequestRenderingContext(
- request: HttpRequest,
- hostHeader: Host,
+ request: HttpRequest,
+ hostHeader: Host,
sendEntityTrigger: Option[Future[NotUsed]] = None)
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala
index e3e46eb734..8a7ca48e72 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala
@@ -23,9 +23,10 @@ import headers._
/**
* INTERNAL API
*/
-private[http] class HttpResponseRendererFactory(serverHeader: Option[headers.Server],
- responseHeaderSizeHint: Int,
- log: LoggingAdapter) {
+private[http] class HttpResponseRendererFactory(
+ serverHeader: Option[headers.Server],
+ responseHeaderSizeHint: Int,
+ log: LoggingAdapter) {
private val renderDefaultServerHeader: Rendering ⇒ Unit =
serverHeader match {
@@ -46,7 +47,7 @@ private[http] class HttpResponseRendererFactory(serverHeader: Option[headers.Ser
val r = new ByteArrayRendering(48)
DateTime(now).renderRfc1123DateTimeString(r ~~ headers.Date) ~~ CrLf
cachedBytes = r.get
- cachedDateHeader = cachedSeconds -> cachedBytes
+ cachedDateHeader = cachedSeconds → cachedBytes
}
cachedBytes
}
@@ -275,10 +276,10 @@ private[http] class HttpResponseRendererFactory(serverHeader: Option[headers.Ser
* INTERNAL API
*/
private[http] final case class ResponseRenderingContext(
- response: HttpResponse,
- requestMethod: HttpMethod = HttpMethods.GET,
+ response: HttpResponse,
+ requestMethod: HttpMethod = HttpMethods.GET,
requestProtocol: HttpProtocol = HttpProtocols.`HTTP/1.1`,
- closeRequested: Boolean = false)
+ closeRequested: Boolean = false)
/** INTERNAL API */
private[http] sealed trait ResponseRenderingOutput
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala
index 6529d5f8a9..23895ada3a 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala
@@ -32,11 +32,10 @@ private object RenderSupport {
val defaultLastChunkBytes: ByteString = renderChunk(HttpEntity.LastChunk)
def CancelSecond[T, Mat](first: Source[T, Mat], second: Source[T, Any]): Source[T, Mat] = {
- Source.fromGraph(GraphDSL.create(first) { implicit b ⇒
- frst ⇒
- import GraphDSL.Implicits._
- second ~> Sink.cancelled
- SourceShape(frst.out)
+ Source.fromGraph(GraphDSL.create(first) { implicit b ⇒ frst ⇒
+ import GraphDSL.Implicits._
+ second ~> Sink.cancelled
+ SourceShape(frst.out)
})
}
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
index 1a58b291cc..dc8f453c56 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala
@@ -156,7 +156,7 @@ private[http] object HttpServerBluePrint {
case StreamedEntityCreator(creator) ⇒ streamRequestEntity(creator)
}
- def streamRequestEntity(creator: (Source[ParserOutput.RequestOutput, NotUsed]) => RequestEntity): RequestEntity = {
+ def streamRequestEntity(creator: (Source[ParserOutput.RequestOutput, NotUsed]) ⇒ RequestEntity): RequestEntity = {
// stream incoming chunks into the request entity until we reach the end of it
// and then toggle back to "idle"
@@ -242,8 +242,9 @@ private[http] object HttpServerBluePrint {
val errorHandler: PartialFunction[Throwable, Throwable] = {
// idle timeouts should not result in errors in the log. See 19058.
- case timeout: HttpConnectionTimeoutException ⇒ log.debug(s"Closing HttpConnection due to timeout: ${timeout.getMessage}"); timeout
- case t ⇒ log.error(t, "Outgoing response stream error"); t
+ case timeout: HttpConnectionTimeoutException ⇒
+ log.debug(s"Closing HttpConnection due to timeout: ${timeout.getMessage}"); timeout
+ case t ⇒ log.error(t, "Outgoing response stream error"); t
}
Flow[ResponseRenderingContext]
@@ -252,7 +253,7 @@ private[http] object HttpServerBluePrint {
}
class RequestTimeoutSupport(initialTimeout: FiniteDuration)
- extends GraphStage[BidiShape[HttpRequest, HttpRequest, HttpResponse, HttpResponse]] {
+ extends GraphStage[BidiShape[HttpRequest, HttpRequest, HttpResponse, HttpResponse]] {
private val requestIn = Inlet[HttpRequest]("requestIn")
private val requestOut = Outlet[HttpRequest]("requestOut")
private val responseIn = Inlet[HttpResponse]("responseIn")
@@ -303,14 +304,15 @@ private[http] object HttpServerBluePrint {
}
}
- private class TimeoutSetup(val timeoutBase: Deadline,
- val scheduledTask: Cancellable,
- val timeout: Duration,
- val handler: HttpRequest ⇒ HttpResponse)
+ private class TimeoutSetup(
+ val timeoutBase: Deadline,
+ val scheduledTask: Cancellable,
+ val timeout: Duration,
+ val handler: HttpRequest ⇒ HttpResponse)
private class TimeoutAccessImpl(request: HttpRequest, initialTimeout: FiniteDuration, requestEnd: Future[Unit],
trigger: AsyncCallback[(TimeoutAccess, HttpResponse)], materializer: Materializer)
- extends AtomicReference[Future[TimeoutSetup]] with TimeoutAccess with (HttpRequest ⇒ HttpResponse) { self ⇒
+ extends AtomicReference[Future[TimeoutSetup]] with TimeoutAccess with (HttpRequest ⇒ HttpResponse) { self ⇒
import materializer.executionContext
set {
@@ -320,8 +322,8 @@ private[http] object HttpServerBluePrint {
override def apply(request: HttpRequest) =
//#default-request-timeout-httpresponse
HttpResponse(StatusCodes.ServiceUnavailable, entity = "The server was not able " +
- "to produce a timely response to your request.\r\nPlease try again in a short while!")
- //#
+ "to produce a timely response to your request.\r\nPlease try again in a short while!")
+ //#
def clear(): Unit = // best effort timeout cancellation
get.fast.foreach(setup ⇒ if (setup.scheduledTask ne null) setup.scheduledTask.cancel())
@@ -355,7 +357,7 @@ private[http] object HttpServerBluePrint {
}
class ControllerStage(settings: ServerSettings, log: LoggingAdapter)
- extends GraphStage[BidiShape[RequestOutput, RequestOutput, HttpResponse, ResponseRenderingContext]] {
+ extends GraphStage[BidiShape[RequestOutput, RequestOutput, HttpResponse, ResponseRenderingContext]] {
private val requestParsingIn = Inlet[RequestOutput]("requestParsingIn")
private val requestPrepOut = Outlet[RequestOutput]("requestPrepOut")
private val httpResponseIn = Inlet[HttpResponse]("httpResponseIn")
@@ -387,7 +389,7 @@ private[http] object HttpServerBluePrint {
messageEndPending = false
push(requestPrepOut, MessageEnd)
case MessageStartError(status, info) ⇒ finishWithIllegalRequestError(status, info)
- case x: EntityStreamError if messageEndPending && openRequests.isEmpty =>
+ case x: EntityStreamError if messageEndPending && openRequests.isEmpty ⇒
// client terminated the connection after receiving an early response to 100-continue
completeStage()
case x ⇒ push(requestPrepOut, x)
@@ -463,7 +465,8 @@ private[http] object HttpServerBluePrint {
})
def finishWithIllegalRequestError(status: StatusCode, info: ErrorInfo): Unit = {
- logParsingError(info withSummaryPrepended s"Illegal request, responding with status '$status'",
+ logParsingError(
+ info withSummaryPrepended s"Illegal request, responding with status '$status'",
log, settings.parserSettings.errorLoggingVerbosity)
val msg = if (settings.verboseErrorMessages) info.formatPretty else info.summary
emitErrorResponse(HttpResponse(status, entity = msg))
@@ -557,7 +560,7 @@ private[http] object HttpServerBluePrint {
One2OneBidiFlow[HttpRequest, HttpResponse](pipeliningLimit).reversed
private class ProtocolSwitchStage(settings: ServerSettings, log: LoggingAdapter)
- extends GraphStage[BidiShape[ResponseRenderingOutput, ByteString, SessionBytes, SessionBytes]] {
+ extends GraphStage[BidiShape[ResponseRenderingOutput, ByteString, SessionBytes, SessionBytes]] {
private val fromNet = Inlet[SessionBytes]("fromNet")
private val toNet = Outlet[ByteString]("toNet")
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala
index 6282cd8c87..6146056cff 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala
@@ -41,23 +41,25 @@ private[http] final case class FrameData(data: ByteString, lastPart: Boolean) ex
}
/** Model of the frame header */
-private[http] final case class FrameHeader(opcode: Protocol.Opcode,
- mask: Option[Int],
- length: Long,
- fin: Boolean,
- rsv1: Boolean = false,
- rsv2: Boolean = false,
- rsv3: Boolean = false)
+private[http] final case class FrameHeader(
+ opcode: Protocol.Opcode,
+ mask: Option[Int],
+ length: Long,
+ fin: Boolean,
+ rsv1: Boolean = false,
+ rsv2: Boolean = false,
+ rsv3: Boolean = false)
private[http] object FrameEvent {
- def empty(opcode: Protocol.Opcode,
- fin: Boolean,
- rsv1: Boolean = false,
- rsv2: Boolean = false,
- rsv3: Boolean = false): FrameStart =
+ def empty(
+ opcode: Protocol.Opcode,
+ fin: Boolean,
+ rsv1: Boolean = false,
+ rsv2: Boolean = false,
+ rsv3: Boolean = false): FrameStart =
fullFrame(opcode, None, ByteString.empty, fin, rsv1, rsv2, rsv3)
def fullFrame(opcode: Protocol.Opcode, mask: Option[Int], data: ByteString,
- fin: Boolean,
+ fin: Boolean,
rsv1: Boolean = false,
rsv2: Boolean = false,
rsv3: Boolean = false): FrameStart =
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEventParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEventParser.scala
index 5f02ea22d3..3fea4f3ab8 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEventParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEventParser.scala
@@ -73,7 +73,8 @@ private[http] object FrameEventParser extends ByteStringParser[FrameEvent] {
def isFlagSet(mask: Int): Boolean = (flags & mask) != 0
val header =
- FrameHeader(Opcode.forCode(op.toByte),
+ FrameHeader(
+ Opcode.forCode(op.toByte),
mask,
length,
fin = isFlagSet(FIN_MASK),
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala
index 885392262f..c207b7c27e 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala
@@ -92,7 +92,8 @@ private[http] object Handshake {
def requestedProtocols: Seq[String] = clientSupportedSubprotocols
def handle(handler: Either[Graph[FlowShape[FrameEvent, FrameEvent], Any], Graph[FlowShape[Message, Message], Any]], subprotocol: Option[String]): HttpResponse = {
- require(subprotocol.forall(chosen ⇒ clientSupportedSubprotocols.contains(chosen)),
+ require(
+ subprotocol.forall(chosen ⇒ clientSupportedSubprotocols.contains(chosen)),
s"Tried to choose invalid subprotocol '$subprotocol' which wasn't offered by the client: [${requestedProtocols.mkString(", ")}]")
buildResponse(key.get, handler, subprotocol)
}
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala
index 3ab7d4eb71..1abe009a30 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala
@@ -26,10 +26,11 @@ private[http] object WebSocket {
/**
* A stack of all the higher WS layers between raw frames and the user API.
*/
- def stack(serverSide: Boolean,
- maskingRandomFactory: () ⇒ Random,
- closeTimeout: FiniteDuration = 3.seconds,
- log: LoggingAdapter): BidiFlow[FrameEvent, Message, Message, FrameEvent, NotUsed] =
+ def stack(
+ serverSide: Boolean,
+ maskingRandomFactory: () ⇒ Random,
+ closeTimeout: FiniteDuration = 3.seconds,
+ log: LoggingAdapter): BidiFlow[FrameEvent, Message, Message, FrameEvent, NotUsed] =
masking(serverSide, maskingRandomFactory) atop
frameHandling(serverSide, closeTimeout, log) atop
messageAPI(serverSide, closeTimeout)
@@ -50,9 +51,10 @@ private[http] object WebSocket {
* The layer that implements all low-level frame handling, like handling control frames, collecting messages
* from frames, decoding text messages, close handling, etc.
*/
- def frameHandling(serverSide: Boolean = true,
- closeTimeout: FiniteDuration,
- log: LoggingAdapter): BidiFlow[FrameEventOrError, FrameHandler.Output, FrameOutHandler.Input, FrameStart, NotUsed] =
+ def frameHandling(
+ serverSide: Boolean = true,
+ closeTimeout: FiniteDuration,
+ log: LoggingAdapter): BidiFlow[FrameEventOrError, FrameHandler.Output, FrameOutHandler.Input, FrameStart, NotUsed] =
BidiFlow.fromFlows(
FrameHandler.create(server = serverSide),
FrameOutHandler.create(serverSide, closeTimeout, log))
@@ -67,7 +69,7 @@ private[http] object WebSocket {
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
var inMessage = false
- override def onPush():Unit = grab(in) match {
+ override def onPush(): Unit = grab(in) match {
case PeerClosed(code, reason) ⇒
if (code.exists(Protocol.CloseCodes.isError)) failStage(new PeerClosedConnectionException(code.get, reason))
else if (inMessage) failStage(new ProtocolException(s"Truncated message, peer closed connection in the middle of message."))
@@ -77,8 +79,8 @@ private[http] object WebSocket {
else failStage(new IllegalStateException("Regular close from FrameHandler is unexpected"))
case x: MessageDataPart ⇒
inMessage = !x.last
- push(out,x)
- case x ⇒ push(out,x)
+ push(out, x)
+ case x ⇒ push(out, x)
}
override def onPull(): Unit = pull(in)
setHandlers(in, out, this)
@@ -88,8 +90,9 @@ private[http] object WebSocket {
/**
* The layer that provides the high-level user facing API on top of frame handling.
*/
- def messageAPI(serverSide: Boolean,
- closeTimeout: FiniteDuration): BidiFlow[FrameHandler.Output, Message, Message, FrameOutHandler.Input, NotUsed] = {
+ def messageAPI(
+ serverSide: Boolean,
+ closeTimeout: FiniteDuration): BidiFlow[FrameHandler.Output, Message, Message, FrameOutHandler.Input, NotUsed] = {
/* Collects user-level API messages from MessageDataParts */
val collectMessage: Flow[MessageDataPart, Message, NotUsed] =
Flow[MessageDataPart]
diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala
index 9986b88eb9..001932af61 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala
@@ -33,9 +33,10 @@ object WebSocketClientBlueprint {
/**
* Returns a WebSocketClientLayer that can be materialized once.
*/
- def apply(request: WebSocketRequest,
- settings: ClientConnectionSettings,
- log: LoggingAdapter): Http.WebSocketClientLayer =
+ def apply(
+ request: WebSocketRequest,
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter): Http.WebSocketClientLayer =
(simpleTls.atopMat(handshake(request, settings, log))(Keep.right) atop
WebSocket.framing atop
WebSocket.stack(serverSide = false, maskingRandomFactory = settings.websocketRandomFactory, log = log)).reversed
@@ -44,9 +45,10 @@ object WebSocketClientBlueprint {
* A bidi flow that injects and inspects the WS handshake and then goes out of the way. This BidiFlow
* can only be materialized once.
*/
- def handshake(request: WebSocketRequest,
- settings: ClientConnectionSettings,
- log: LoggingAdapter): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[WebSocketUpgradeResponse]] = {
+ def handshake(
+ request: WebSocketRequest,
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[WebSocketUpgradeResponse]] = {
import request._
val result = Promise[WebSocketUpgradeResponse]()
diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala
index d000081242..1176c33133 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala
@@ -180,8 +180,8 @@ private[parser] trait CommonRules { this: Parser with StringBuilding ⇒
def `challenge-or-credentials`: Rule2[String, Seq[(String, String)]] = rule {
`auth-scheme` ~ (
- oneOrMore(`auth-param` ~> (_ -> _)).separatedBy(listSep)
- | `token68` ~> (x ⇒ ("" -> x) :: Nil)
+ oneOrMore(`auth-param` ~> (_ → _)).separatedBy(listSep)
+ | `token68` ~> (x ⇒ ("" → x) :: Nil)
| push(Nil))
}
@@ -397,7 +397,7 @@ private[parser] trait CommonRules { this: Parser with StringBuilding ⇒
token ~ zeroOrMore(ws(';') ~ `transfer-parameter`) ~> (_.toMap) ~> (TransferEncodings.Extension(_, _))
}
- def `transfer-parameter` = rule { token ~ ws('=') ~ word ~> (_ -> _) }
+ def `transfer-parameter` = rule { token ~ ws('=') ~ word ~> (_ → _) }
// ******************************************************************************************
// helpers
diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala
index 41df467a4b..6c11098f87 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala
@@ -22,7 +22,7 @@ private[parser] trait ContentDispositionHeader { this: Parser with CommonRules w
def `disp-ext-type` = rule { token }
- def `disposition-parm` = rule { (`filename-parm` | `disp-ext-parm`) ~> (_ -> _) }
+ def `disposition-parm` = rule { (`filename-parm` | `disp-ext-parm`) ~> (_ → _) }
def `filename-parm` = rule(
ignoreCase("filename") ~ OWS ~ ws('=') ~ push("filename") ~ word
diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala
index b54990c8cc..4df602a98c 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala
@@ -15,11 +15,12 @@ private[parser] trait ContentTypeHeader { this: Parser with CommonRules with Com
`media-type` ~ EOI ~> ((main, sub, params) ⇒ headers.`Content-Type`(contentType(main, sub, params)))
}
- @tailrec private def contentType(main: String,
- sub: String,
- params: Seq[(String, String)],
- charset: Option[HttpCharset] = None,
- builder: StringMapBuilder = null): ContentType =
+ @tailrec private def contentType(
+ main: String,
+ sub: String,
+ params: Seq[(String, String)],
+ charset: Option[HttpCharset] = None,
+ builder: StringMapBuilder = null): ContentType =
params match {
case Nil ⇒
val parameters = if (builder eq null) Map.empty[String, String] else builder.result()
diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala
index 9c77e9fba9..1d95e2f0b6 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala
@@ -19,7 +19,7 @@ import akka.http.scaladsl.model._
*/
private[http] class HeaderParser(
val input: ParserInput,
- settings: HeaderParser.Settings = HeaderParser.DefaultSettings)
+ settings: HeaderParser.Settings = HeaderParser.DefaultSettings)
extends Parser with DynamicRuleHandler[HeaderParser, HttpHeader :: HNil]
with CommonRules
with AcceptCharsetHeader
@@ -99,7 +99,8 @@ private[http] object HeaderParser {
dispatch(parser, headerName) match {
case r @ Right(_) if parser.cursor == v.length ⇒ r
case r @ Right(_) ⇒
- Left(ErrorInfo("Header parsing error",
+ Left(ErrorInfo(
+ "Header parsing error",
s"Rule for $headerName accepted trailing garbage. Is the parser missing a trailing EOI?"))
case Left(e) ⇒ Left(e.copy(summary = e.summary.filterNot(_ == EOI), detail = e.detail.filterNot(_ == EOI)))
}
@@ -169,9 +170,10 @@ private[http] object HeaderParser {
def cookieParsingMode: ParserSettings.CookieParsingMode
def customMediaTypes: MediaTypes.FindCustom
}
- def Settings(uriParsingMode: Uri.ParsingMode = Uri.ParsingMode.Relaxed,
- cookieParsingMode: ParserSettings.CookieParsingMode = ParserSettings.CookieParsingMode.RFC6265,
- customMediaTypes: MediaTypes.FindCustom = ConstantFun.scalaAnyTwoToNone): Settings = {
+ def Settings(
+ uriParsingMode: Uri.ParsingMode = Uri.ParsingMode.Relaxed,
+ cookieParsingMode: ParserSettings.CookieParsingMode = ParserSettings.CookieParsingMode.RFC6265,
+ customMediaTypes: MediaTypes.FindCustom = ConstantFun.scalaAnyTwoToNone): Settings = {
val _uriParsingMode = uriParsingMode
val _cookieParsingMode = cookieParsingMode
val _customMediaTypes = customMediaTypes
diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala
index 1f8e4d40b1..fbe942717e 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala
@@ -18,13 +18,13 @@ import scala.concurrent.duration.{ Duration, FiniteDuration }
/** INTERNAL API */
private[akka] final case class ClientConnectionSettingsImpl(
- userAgentHeader: Option[`User-Agent`],
- connectingTimeout: FiniteDuration,
- idleTimeout: Duration,
- requestHeaderSizeHint: Int,
+ userAgentHeader: Option[`User-Agent`],
+ connectingTimeout: FiniteDuration,
+ idleTimeout: Duration,
+ requestHeaderSizeHint: Int,
websocketRandomFactory: () ⇒ Random,
- socketOptions: immutable.Seq[SocketOption],
- parserSettings: ParserSettings)
+ socketOptions: immutable.Seq[SocketOption],
+ parserSettings: ParserSettings)
extends akka.http.scaladsl.settings.ClientConnectionSettings {
require(connectingTimeout >= Duration.Zero, "connectingTimeout must be >= 0")
diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala
index dfa0a63f0c..dc4411093f 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala
@@ -12,11 +12,11 @@ import scala.concurrent.duration.Duration
/** INTERNAL API */
private[akka] final case class ConnectionPoolSettingsImpl(
- val maxConnections: Int,
- val maxRetries: Int,
- val maxOpenRequests: Int,
- val pipeliningLimit: Int,
- val idleTimeout: Duration,
+ val maxConnections: Int,
+ val maxRetries: Int,
+ val maxOpenRequests: Int,
+ val pipeliningLimit: Int,
+ val idleTimeout: Duration,
val connectionSettings: ClientConnectionSettings)
extends ConnectionPoolSettings {
diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala
index 6eed86dbee..a7ad0c8c19 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala
@@ -10,6 +10,6 @@ import akka.http.scaladsl.settings.ConnectionPoolSettings
/** INTERNAL API */
private[akka] final case class ConnectionPoolSetup(
- settings: ConnectionPoolSettings,
- connectionContext: ConnectionContext = ConnectionContext.noEncryption(),
- log: LoggingAdapter)
\ No newline at end of file
+ settings: ConnectionPoolSettings,
+ connectionContext: ConnectionContext = ConnectionContext.noEncryption(),
+ log: LoggingAdapter)
\ No newline at end of file
diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala
index 021f774480..4214065af4 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala
@@ -14,24 +14,24 @@ import akka.http.impl.util._
/** INTERNAL API */
private[akka] final case class ParserSettingsImpl(
- maxUriLength: Int,
- maxMethodLength: Int,
- maxResponseReasonLength: Int,
- maxHeaderNameLength: Int,
- maxHeaderValueLength: Int,
- maxHeaderCount: Int,
- maxContentLength: Long,
- maxChunkExtLength: Int,
- maxChunkSize: Int,
- uriParsingMode: Uri.ParsingMode,
- cookieParsingMode: CookieParsingMode,
- illegalHeaderWarnings: Boolean,
- errorLoggingVerbosity: ParserSettings.ErrorLoggingVerbosity,
- headerValueCacheLimits: Map[String, Int],
+ maxUriLength: Int,
+ maxMethodLength: Int,
+ maxResponseReasonLength: Int,
+ maxHeaderNameLength: Int,
+ maxHeaderValueLength: Int,
+ maxHeaderCount: Int,
+ maxContentLength: Long,
+ maxChunkExtLength: Int,
+ maxChunkSize: Int,
+ uriParsingMode: Uri.ParsingMode,
+ cookieParsingMode: CookieParsingMode,
+ illegalHeaderWarnings: Boolean,
+ errorLoggingVerbosity: ParserSettings.ErrorLoggingVerbosity,
+ headerValueCacheLimits: Map[String, Int],
includeTlsSessionInfoHeader: Boolean,
- customMethods: String ⇒ Option[HttpMethod],
- customStatusCodes: Int ⇒ Option[StatusCode],
- customMediaTypes: MediaTypes.FindCustom)
+ customMethods: String ⇒ Option[HttpMethod],
+ customStatusCodes: Int ⇒ Option[StatusCode],
+ customMediaTypes: MediaTypes.FindCustom)
extends akka.http.scaladsl.settings.ParserSettings {
require(maxUriLength > 0, "max-uri-length must be > 0")
@@ -76,7 +76,7 @@ object ParserSettingsImpl extends SettingsCompanion[ParserSettingsImpl]("akka.ht
CookieParsingMode(c getString "cookie-parsing-mode"),
c getBoolean "illegal-header-warnings",
ErrorLoggingVerbosity(c getString "error-logging-verbosity"),
- cacheConfig.entrySet.asScala.map(kvp ⇒ kvp.getKey -> cacheConfig.getInt(kvp.getKey))(collection.breakOut),
+ cacheConfig.entrySet.asScala.map(kvp ⇒ kvp.getKey → cacheConfig.getInt(kvp.getKey))(collection.breakOut),
c getBoolean "tls-session-info-header",
noCustomMethods,
noCustomStatusCodes,
diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala
index de55b0c675..8abd0ea50a 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala
@@ -9,13 +9,13 @@ import com.typesafe.config.Config
/** INTERNAL API */
final case class RoutingSettingsImpl(
- verboseErrorMessages: Boolean,
- fileGetConditional: Boolean,
- renderVanityFooter: Boolean,
- rangeCountLimit: Int,
+ verboseErrorMessages: Boolean,
+ fileGetConditional: Boolean,
+ renderVanityFooter: Boolean,
+ rangeCountLimit: Int,
rangeCoalescingThreshold: Long,
- decodeMaxBytesPerChunk: Int,
- fileIODispatcher: String) extends akka.http.scaladsl.settings.RoutingSettings {
+ decodeMaxBytesPerChunk: Int,
+ fileIODispatcher: String) extends akka.http.scaladsl.settings.RoutingSettings {
override def productPrefix = "RoutingSettings"
}
diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala
index fb74c3f72b..32ee36b9ae 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala
@@ -25,20 +25,20 @@ import akka.http.scaladsl.model.headers.{ Host, Server }
/** INTERNAL API */
private[akka] final case class ServerSettingsImpl(
- serverHeader: Option[Server],
- timeouts: ServerSettings.Timeouts,
- maxConnections: Int,
- pipeliningLimit: Int,
- remoteAddressHeader: Boolean,
- rawRequestUriHeader: Boolean,
+ serverHeader: Option[Server],
+ timeouts: ServerSettings.Timeouts,
+ maxConnections: Int,
+ pipeliningLimit: Int,
+ remoteAddressHeader: Boolean,
+ rawRequestUriHeader: Boolean,
transparentHeadRequests: Boolean,
- verboseErrorMessages: Boolean,
- responseHeaderSizeHint: Int,
- backlog: Int,
- socketOptions: immutable.Seq[SocketOption],
- defaultHostHeader: Host,
- websocketRandomFactory: () ⇒ Random,
- parserSettings: ParserSettings) extends ServerSettings {
+ verboseErrorMessages: Boolean,
+ responseHeaderSizeHint: Int,
+ backlog: Int,
+ socketOptions: immutable.Seq[SocketOption],
+ defaultHostHeader: Host,
+ websocketRandomFactory: () ⇒ Random,
+ parserSettings: ParserSettings) extends ServerSettings {
require(0 < maxConnections, "max-connections must be > 0")
require(0 < pipeliningLimit && pipeliningLimit <= 1024, "pipelining-limit must be > 0 and <= 1024")
@@ -53,9 +53,9 @@ object ServerSettingsImpl extends SettingsCompanion[ServerSettingsImpl]("akka.ht
/** INTERNAL API */
final case class Timeouts(
- idleTimeout: Duration,
+ idleTimeout: Duration,
requestTimeout: Duration,
- bindTimeout: FiniteDuration) extends ServerSettings.Timeouts {
+ bindTimeout: FiniteDuration) extends ServerSettings.Timeouts {
require(idleTimeout > Duration.Zero, "idleTimeout must be infinite or > 0")
require(requestTimeout > Duration.Zero, "requestTimeout must be infinite or > 0")
require(bindTimeout > Duration.Zero, "bindTimeout must be > 0")
diff --git a/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala b/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala
index e125dce8a1..df8b5f2b01 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala
@@ -54,6 +54,6 @@ private[http] object SettingsCompanion {
val localHostName =
try new InetSocketAddress(InetAddress.getLocalHost, 80).getHostString
catch { case NonFatal(_) ⇒ "" }
- ConfigFactory.parseMap(Map("akka.http.hostname" -> localHostName).asJava)
+ ConfigFactory.parseMap(Map("akka.http.hostname" → localHostName).asJava)
}
}
\ No newline at end of file
diff --git a/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala b/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala
index aabfaa4d8b..b2cea22a33 100644
--- a/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala
+++ b/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala
@@ -70,7 +70,7 @@ private[http] object StreamUtils {
setHandlers(in, out, this)
}
}
- source.via(transformer) -> promise.future
+ source.via(transformer) → promise.future
}
def sliceBytesTransformer(start: Long, length: Long): Flow[ByteString, ByteString, NotUsed] = {
@@ -163,7 +163,7 @@ private[http] object StreamUtils {
/** A copy of PublisherSink that allows access to the publisher through the cell but can only materialized once */
private class OneTimePublisherSink[In](attributes: Attributes, shape: SinkShape[In], cell: OneTimeWriteCell[Publisher[In]])
- extends PublisherSink[In](attributes, shape) {
+ extends PublisherSink[In](attributes, shape) {
override def create(context: MaterializationContext): (AnyRef, Publisher[In]) = {
val results = super.create(context)
cell.set(results._2)
@@ -177,7 +177,7 @@ private[http] object StreamUtils {
}
/** A copy of SubscriberSource that allows access to the subscriber through the cell but can only materialized once */
private class OneTimeSubscriberSource[Out](val attributes: Attributes, shape: SourceShape[Out], cell: OneTimeWriteCell[Subscriber[Out]])
- extends SourceModule[Out, Subscriber[Out]](shape) {
+ extends SourceModule[Out, Subscriber[Out]](shape) {
override def create(context: MaterializationContext): (Publisher[Out], Subscriber[Out]) = {
val processor = new Processor[Out, Out] {
diff --git a/akka-http-core/src/main/scala/akka/http/javadsl/ConnectionContext.scala b/akka-http-core/src/main/scala/akka/http/javadsl/ConnectionContext.scala
index 73f089ed18..ff76145bf4 100644
--- a/akka-http-core/src/main/scala/akka/http/javadsl/ConnectionContext.scala
+++ b/akka-http-core/src/main/scala/akka/http/javadsl/ConnectionContext.scala
@@ -22,12 +22,13 @@ object ConnectionContext {
scaladsl.ConnectionContext.https(sslContext)
/** Used to serve HTTPS traffic. */
- def https(sslContext: SSLContext,
- sslConfig: Optional[AkkaSSLConfig],
- enabledCipherSuites: Optional[JCollection[String]],
- enabledProtocols: Optional[JCollection[String]],
- clientAuth: Optional[TLSClientAuth],
- sslParameters: Optional[SSLParameters]) =
+ def https(
+ sslContext: SSLContext,
+ sslConfig: Optional[AkkaSSLConfig],
+ enabledCipherSuites: Optional[JCollection[String]],
+ enabledProtocols: Optional[JCollection[String]],
+ clientAuth: Optional[TLSClientAuth],
+ sslParameters: Optional[SSLParameters]) =
scaladsl.ConnectionContext.https(
sslContext,
OptionConverters.toScala(sslConfig),
@@ -39,11 +40,12 @@ object ConnectionContext {
/** Used to serve HTTPS traffic. */
// for binary-compatibility, since 2.4.7
- def https(sslContext: SSLContext,
- enabledCipherSuites: Optional[JCollection[String]],
- enabledProtocols: Optional[JCollection[String]],
- clientAuth: Optional[TLSClientAuth],
- sslParameters: Optional[SSLParameters]) =
+ def https(
+ sslContext: SSLContext,
+ enabledCipherSuites: Optional[JCollection[String]],
+ enabledProtocols: Optional[JCollection[String]],
+ clientAuth: Optional[TLSClientAuth],
+ sslParameters: Optional[SSLParameters]) =
scaladsl.ConnectionContext.https(
sslContext,
OptionConverters.toScala(enabledCipherSuites).map(Util.immutableSeq(_)),
diff --git a/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala b/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala
index 5575137ad4..6bb2e1511c 100644
--- a/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala
+++ b/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala
@@ -55,8 +55,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* Constructs a server layer stage using the given [[akka.http.javadsl.settings.ServerSettings]]. The returned [[BidiFlow]] isn't reusable and
* can only be materialized once.
*/
- def serverLayer(settings: ServerSettings,
- materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] =
+ def serverLayer(
+ settings: ServerSettings,
+ materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] =
adaptServerLayer(delegate.serverLayer(settings.asScala)(materializer))
/**
@@ -64,9 +65,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* can only be materialized once. The `remoteAddress`, if provided, will be added as a header to each [[HttpRequest]]
* this layer produces if the `akka.http.server.remote-address-header` configuration option is enabled.
*/
- def serverLayer(settings: ServerSettings,
- remoteAddress: Optional[InetSocketAddress],
- materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] =
+ def serverLayer(
+ settings: ServerSettings,
+ remoteAddress: Optional[InetSocketAddress],
+ materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] =
adaptServerLayer(delegate.serverLayer(settings.asScala, remoteAddress.asScala)(materializer))
/**
@@ -74,10 +76,11 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* can only be materialized once. The remoteAddress, if provided, will be added as a header to each [[HttpRequest]]
* this layer produces if the `akka.http.server.remote-address-header` configuration option is enabled.
*/
- def serverLayer(settings: ServerSettings,
- remoteAddress: Optional[InetSocketAddress],
- log: LoggingAdapter,
- materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] =
+ def serverLayer(
+ settings: ServerSettings,
+ remoteAddress: Optional[InetSocketAddress],
+ log: LoggingAdapter,
+ materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] =
adaptServerLayer(delegate.serverLayer(settings.asScala, remoteAddress.asScala, log)(materializer))
/**
@@ -117,9 +120,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bind(connect: ConnectHttp,
- settings: ServerSettings,
- materializer: Materializer): Source[IncomingConnection, CompletionStage[ServerBinding]] = {
+ def bind(
+ connect: ConnectHttp,
+ settings: ServerSettings,
+ materializer: Materializer): Source[IncomingConnection, CompletionStage[ServerBinding]] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
new Source(delegate.bind(connect.host, connect.port, settings = settings.asScala, connectionContext = connectionContext)(materializer)
.map(new IncomingConnection(_))
@@ -141,10 +145,11 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bind(connect: ConnectHttp,
- settings: ServerSettings,
- log: LoggingAdapter,
- materializer: Materializer): Source[IncomingConnection, CompletionStage[ServerBinding]] = {
+ def bind(
+ connect: ConnectHttp,
+ settings: ServerSettings,
+ log: LoggingAdapter,
+ materializer: Materializer): Source[IncomingConnection, CompletionStage[ServerBinding]] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
new Source(delegate.bind(connect.host, connect.port, connectionContext, settings.asScala, log)(materializer)
.map(new IncomingConnection(_))
@@ -161,11 +166,13 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, _],
- connect: ConnectHttp,
- materializer: Materializer): CompletionStage[ServerBinding] = {
+ def bindAndHandle(
+ handler: Flow[HttpRequest, HttpResponse, _],
+ connect: ConnectHttp,
+ materializer: Materializer): CompletionStage[ServerBinding] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
- delegate.bindAndHandle(handler.asInstanceOf[Flow[sm.HttpRequest, sm.HttpResponse, _]].asScala,
+ delegate.bindAndHandle(
+ handler.asInstanceOf[Flow[sm.HttpRequest, sm.HttpResponse, _]].asScala,
connect.host, connect.port, connectionContext)(materializer)
.map(new ServerBinding(_))(ec).toJava
}
@@ -180,13 +187,15 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, _],
- connect: ConnectHttp,
- settings: ServerSettings,
- log: LoggingAdapter,
- materializer: Materializer): CompletionStage[ServerBinding] = {
+ def bindAndHandle(
+ handler: Flow[HttpRequest, HttpResponse, _],
+ connect: ConnectHttp,
+ settings: ServerSettings,
+ log: LoggingAdapter,
+ materializer: Materializer): CompletionStage[ServerBinding] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
- delegate.bindAndHandle(handler.asInstanceOf[Flow[sm.HttpRequest, sm.HttpResponse, _]].asScala,
+ delegate.bindAndHandle(
+ handler.asInstanceOf[Flow[sm.HttpRequest, sm.HttpResponse, _]].asScala,
connect.host, connect.port, connectionContext, settings.asScala, log)(materializer)
.map(new ServerBinding(_))(ec).toJava
}
@@ -201,9 +210,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bindAndHandleSync(handler: Function[HttpRequest, HttpResponse],
- connect: ConnectHttp,
- materializer: Materializer): CompletionStage[ServerBinding] = {
+ def bindAndHandleSync(
+ handler: Function[HttpRequest, HttpResponse],
+ connect: ConnectHttp,
+ materializer: Materializer): CompletionStage[ServerBinding] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
delegate.bindAndHandleSync(handler.apply(_).asScala, connect.host, connect.port, connectionContext)(materializer)
.map(new ServerBinding(_))(ec).toJava
@@ -219,13 +229,15 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bindAndHandleSync(handler: Function[HttpRequest, HttpResponse],
- connect: ConnectHttp,
- settings: ServerSettings,
- log: LoggingAdapter,
- materializer: Materializer): CompletionStage[ServerBinding] = {
+ def bindAndHandleSync(
+ handler: Function[HttpRequest, HttpResponse],
+ connect: ConnectHttp,
+ settings: ServerSettings,
+ log: LoggingAdapter,
+ materializer: Materializer): CompletionStage[ServerBinding] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
- delegate.bindAndHandleSync(handler.apply(_).asScala,
+ delegate.bindAndHandleSync(
+ handler.apply(_).asScala,
connect.host, connect.port, connectionContext, settings.asScala, log)(materializer)
.map(new ServerBinding(_))(ec).toJava
}
@@ -240,9 +252,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bindAndHandleAsync(handler: Function[HttpRequest, CompletionStage[HttpResponse]],
- connect: ConnectHttp,
- materializer: Materializer): CompletionStage[ServerBinding] = {
+ def bindAndHandleAsync(
+ handler: Function[HttpRequest, CompletionStage[HttpResponse]],
+ connect: ConnectHttp,
+ materializer: Materializer): CompletionStage[ServerBinding] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
delegate.bindAndHandleAsync(handler.apply(_).toScala, connect.host, connect.port, connectionContext)(materializer)
.map(new ServerBinding(_))(ec).toJava
@@ -258,13 +271,15 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]],
* or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]].
*/
- def bindAndHandleAsync(handler: Function[HttpRequest, CompletionStage[HttpResponse]],
- connect: ConnectHttp,
- settings: ServerSettings,
- parallelism: Int, log: LoggingAdapter,
- materializer: Materializer): CompletionStage[ServerBinding] = {
+ def bindAndHandleAsync(
+ handler: Function[HttpRequest, CompletionStage[HttpResponse]],
+ connect: ConnectHttp,
+ settings: ServerSettings,
+ parallelism: Int, log: LoggingAdapter,
+ materializer: Materializer): CompletionStage[ServerBinding] = {
val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala
- delegate.bindAndHandleAsync(handler.apply(_).toScala,
+ delegate.bindAndHandleAsync(
+ handler.apply(_).toScala,
connect.host, connect.port, connectionContext, settings.asScala, parallelism, log)(materializer)
.map(new ServerBinding(_))(ec).toJava
}
@@ -278,16 +293,18 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
/**
* Constructs a client layer stage using the given [[akka.http.javadsl.settings.ClientConnectionSettings]].
*/
- def clientLayer(hostHeader: headers.Host,
- settings: ClientConnectionSettings): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] =
+ def clientLayer(
+ hostHeader: headers.Host,
+ settings: ClientConnectionSettings): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] =
adaptClientLayer(delegate.clientLayer(JavaMapping.toScala(hostHeader), settings.asScala))
/**
* Constructs a client layer stage using the given [[ClientConnectionSettings]].
*/
- def clientLayer(hostHeader: headers.Host,
- settings: ClientConnectionSettings,
- log: LoggingAdapter): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] =
+ def clientLayer(
+ hostHeader: headers.Host,
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] =
adaptClientLayer(delegate.clientLayer(JavaMapping.toScala(hostHeader), settings.asScala, log))
/**
@@ -315,10 +332,11 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* Creates a [[Flow]] representing a prospective HTTP client connection to the given endpoint.
* Every materialization of the produced flow will attempt to establish a new outgoing connection.
*/
- def outgoingConnection(to: ConnectHttp,
- localAddress: Optional[InetSocketAddress],
- settings: ClientConnectionSettings,
- log: LoggingAdapter): Flow[HttpRequest, HttpResponse, CompletionStage[OutgoingConnection]] =
+ def outgoingConnection(
+ to: ConnectHttp,
+ localAddress: Optional[InetSocketAddress],
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter): Flow[HttpRequest, HttpResponse, CompletionStage[OutgoingConnection]] =
adaptOutgoingFlow {
if (to.isHttps)
delegate.outgoingConnectionHttps(to.host, to.port, to.effectiveConnectionContext(defaultClientHttpsContext).asInstanceOf[HttpsConnectionContext].asScala, localAddress.asScala, settings.asScala, log)
@@ -365,9 +383,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* The given [[ConnectionContext]] will be used for encryption on the connection.
*/
- def newHostConnectionPool[T](to: ConnectHttp,
- settings: ConnectionPoolSettings,
- log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] =
+ def newHostConnectionPool[T](
+ to: ConnectHttp,
+ settings: ConnectionPoolSettings,
+ log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] =
adaptTupleFlow {
to.effectiveHttpsConnectionContext(defaultClientHttpsContext) match {
case https: HttpsConnectionContext ⇒
@@ -424,9 +443,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* The given [[ConnectionContext]] will be used for encryption on the connection.
*/
- def cachedHostConnectionPool[T](to: ConnectHttp,
- settings: ConnectionPoolSettings,
- log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] =
+ def cachedHostConnectionPool[T](
+ to: ConnectHttp,
+ settings: ConnectionPoolSettings,
+ log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] =
adaptTupleFlow(delegate.cachedHostConnectionPoolHttps[T](to.host, to.port, to.effectiveHttpsConnectionContext(defaultClientHttpsContext).asScala, settings.asScala, log)(materializer)
.mapMaterializedValue(_.toJava))
@@ -460,9 +480,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* In order to allow for easy response-to-request association the flow takes in a custom, opaque context
* object of type `T` from the application which is emitted together with the corresponding response.
*/
- def superPool[T](settings: ConnectionPoolSettings,
- connectionContext: HttpsConnectionContext,
- log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] =
+ def superPool[T](
+ settings: ConnectionPoolSettings,
+ connectionContext: HttpsConnectionContext,
+ log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] =
adaptTupleFlow(delegate.superPool[T](connectionContext.asScala, settings.asScala, log)(materializer))
/**
@@ -480,8 +501,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* In order to allow for easy response-to-request association the flow takes in a custom, opaque context
* object of type `T` from the application which is emitted together with the corresponding response.
*/
- def superPool[T](settings: ConnectionPoolSettings,
- log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] =
+ def superPool[T](
+ settings: ConnectionPoolSettings,
+ log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] =
adaptTupleFlow(delegate.superPool[T](defaultClientHttpsContext.asScala, settings.asScala, log)(materializer))
/**
@@ -517,10 +539,11 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* Note that the request must have either an absolute URI or a valid `Host` header, otherwise
* the future will be completed with an error.
*/
- def singleRequest(request: HttpRequest,
- connectionContext: HttpsConnectionContext,
- settings: ConnectionPoolSettings,
- log: LoggingAdapter, materializer: Materializer): CompletionStage[HttpResponse] =
+ def singleRequest(
+ request: HttpRequest,
+ connectionContext: HttpsConnectionContext,
+ settings: ConnectionPoolSettings,
+ log: LoggingAdapter, materializer: Materializer): CompletionStage[HttpResponse] =
delegate.singleRequest(request.asScala, connectionContext.asScala, settings.asScala, log)(materializer).toJava
/**
@@ -537,8 +560,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* The layer is not reusable and must only be materialized once.
*/
- def webSocketClientLayer(request: WebSocketRequest,
- settings: ClientConnectionSettings): BidiFlow[Message, SslTlsOutbound, SslTlsInbound, Message, CompletionStage[WebSocketUpgradeResponse]] =
+ def webSocketClientLayer(
+ request: WebSocketRequest,
+ settings: ClientConnectionSettings): BidiFlow[Message, SslTlsOutbound, SslTlsInbound, Message, CompletionStage[WebSocketUpgradeResponse]] =
adaptWsBidiFlow(delegate.webSocketClientLayer(request.asScala, settings.asScala))
/**
@@ -547,9 +571,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* The layer is not reusable and must only be materialized once.
*/
- def webSocketClientLayer(request: WebSocketRequest,
- settings: ClientConnectionSettings,
- log: LoggingAdapter): BidiFlow[Message, SslTlsOutbound, SslTlsInbound, Message, CompletionStage[WebSocketUpgradeResponse]] =
+ def webSocketClientLayer(
+ request: WebSocketRequest,
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter): BidiFlow[Message, SslTlsOutbound, SslTlsInbound, Message, CompletionStage[WebSocketUpgradeResponse]] =
adaptWsBidiFlow(delegate.webSocketClientLayer(request.asScala, settings.asScala, log))
/**
@@ -567,11 +592,12 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* The layer is not reusable and must only be materialized once.
*/
- def webSocketClientFlow(request: WebSocketRequest,
- connectionContext: ConnectionContext,
- localAddress: Optional[InetSocketAddress],
- settings: ClientConnectionSettings,
- log: LoggingAdapter): Flow[Message, Message, CompletionStage[WebSocketUpgradeResponse]] =
+ def webSocketClientFlow(
+ request: WebSocketRequest,
+ connectionContext: ConnectionContext,
+ localAddress: Optional[InetSocketAddress],
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter): Flow[Message, Message, CompletionStage[WebSocketUpgradeResponse]] =
adaptWsFlow {
delegate.webSocketClientFlow(request.asScala, connectionContext.asScala, localAddress.asScala, settings.asScala, log)
}
@@ -582,9 +608,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* The [[defaultClientHttpsContext]] is used to configure TLS for the connection.
*/
- def singleWebSocketRequest[T](request: WebSocketRequest,
- clientFlow: Flow[Message, Message, T],
- materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] =
+ def singleWebSocketRequest[T](
+ request: WebSocketRequest,
+ clientFlow: Flow[Message, Message, T],
+ materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] =
adaptWsResultTuple {
delegate.singleWebSocketRequest(
request.asScala,
@@ -597,10 +624,11 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
*
* The [[defaultClientHttpsContext]] is used to configure TLS for the connection.
*/
- def singleWebSocketRequest[T](request: WebSocketRequest,
- clientFlow: Flow[Message, Message, T],
- connectionContext: ConnectionContext,
- materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] =
+ def singleWebSocketRequest[T](
+ request: WebSocketRequest,
+ clientFlow: Flow[Message, Message, T],
+ connectionContext: ConnectionContext,
+ materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] =
adaptWsResultTuple {
delegate.singleWebSocketRequest(
request.asScala,
@@ -612,13 +640,14 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension {
* Runs a single WebSocket conversation given a Uri and a flow that represents the client side of the
* WebSocket conversation.
*/
- def singleWebSocketRequest[T](request: WebSocketRequest,
- clientFlow: Flow[Message, Message, T],
- connectionContext: ConnectionContext,
- localAddress: Optional[InetSocketAddress],
- settings: ClientConnectionSettings,
- log: LoggingAdapter,
- materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] =
+ def singleWebSocketRequest[T](
+ request: WebSocketRequest,
+ clientFlow: Flow[Message, Message, T],
+ connectionContext: ConnectionContext,
+ localAddress: Optional[InetSocketAddress],
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter,
+ materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] =
adaptWsResultTuple {
delegate.singleWebSocketRequest(
request.asScala,
diff --git a/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala b/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala
index dd43a48845..8eec14babf 100644
--- a/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala
+++ b/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala
@@ -61,18 +61,18 @@ abstract class ParserSettings private[akka] () extends BodyPartParser.Settings {
@varargs
def withCustomMethods(methods: HttpMethod*): ParserSettings = {
- val map = methods.map(m ⇒ m.name -> m.asScala).toMap
+ val map = methods.map(m ⇒ m.name → m.asScala).toMap
self.copy(customMethods = map.get)
}
@varargs
def withCustomStatusCodes(codes: StatusCode*): ParserSettings = {
- val map = codes.map(c ⇒ c.intValue -> c.asScala).toMap
+ val map = codes.map(c ⇒ c.intValue → c.asScala).toMap
self.copy(customStatusCodes = map.get)
}
@varargs
def withCustomMediaTypes(mediaTypes: MediaType*): ParserSettings = {
- val map = mediaTypes.map(c ⇒ (c.mainType, c.subType) -> c.asScala).toMap
- self.copy(customMediaTypes = (main, sub) ⇒ map.get(main -> sub))
+ val map = mediaTypes.map(c ⇒ (c.mainType, c.subType) → c.asScala).toMap
+ self.copy(customMediaTypes = (main, sub) ⇒ map.get(main → sub))
}
}
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala
index 1ae03b5083..b29c0a3ac8 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala
@@ -22,42 +22,44 @@ trait ConnectionContext extends akka.http.javadsl.ConnectionContext {
object ConnectionContext {
//#https-context-creation
// ConnectionContext
- def https(sslContext: SSLContext,
- sslConfig: Option[AkkaSSLConfig] = None,
- enabledCipherSuites: Option[immutable.Seq[String]] = None,
- enabledProtocols: Option[immutable.Seq[String]] = None,
- clientAuth: Option[TLSClientAuth] = None,
- sslParameters: Option[SSLParameters] = None) =
+ def https(
+ sslContext: SSLContext,
+ sslConfig: Option[AkkaSSLConfig] = None,
+ enabledCipherSuites: Option[immutable.Seq[String]] = None,
+ enabledProtocols: Option[immutable.Seq[String]] = None,
+ clientAuth: Option[TLSClientAuth] = None,
+ sslParameters: Option[SSLParameters] = None) =
new HttpsConnectionContext(sslContext, sslConfig, enabledCipherSuites, enabledProtocols, clientAuth, sslParameters)
//#https-context-creation
// for binary-compatibility, since 2.4.7
- def https(sslContext: SSLContext,
- enabledCipherSuites: Option[immutable.Seq[String]],
- enabledProtocols: Option[immutable.Seq[String]],
- clientAuth: Option[TLSClientAuth],
- sslParameters: Option[SSLParameters]) =
+ def https(
+ sslContext: SSLContext,
+ enabledCipherSuites: Option[immutable.Seq[String]],
+ enabledProtocols: Option[immutable.Seq[String]],
+ clientAuth: Option[TLSClientAuth],
+ sslParameters: Option[SSLParameters]) =
new HttpsConnectionContext(sslContext, None, enabledCipherSuites, enabledProtocols, clientAuth, sslParameters)
def noEncryption() = HttpConnectionContext
}
final class HttpsConnectionContext(
- val sslContext: SSLContext,
- val sslConfig: Option[AkkaSSLConfig] = None,
+ val sslContext: SSLContext,
+ val sslConfig: Option[AkkaSSLConfig] = None,
val enabledCipherSuites: Option[immutable.Seq[String]] = None,
- val enabledProtocols: Option[immutable.Seq[String]] = None,
- val clientAuth: Option[TLSClientAuth] = None,
- val sslParameters: Option[SSLParameters] = None)
+ val enabledProtocols: Option[immutable.Seq[String]] = None,
+ val clientAuth: Option[TLSClientAuth] = None,
+ val sslParameters: Option[SSLParameters] = None)
extends akka.http.javadsl.HttpsConnectionContext with ConnectionContext {
// for binary-compatibility, since 2.4.7
def this(
- sslContext: SSLContext,
+ sslContext: SSLContext,
enabledCipherSuites: Option[immutable.Seq[String]],
- enabledProtocols: Option[immutable.Seq[String]],
- clientAuth: Option[TLSClientAuth],
- sslParameters: Option[SSLParameters]) =
+ enabledProtocols: Option[immutable.Seq[String]],
+ clientAuth: Option[TLSClientAuth],
+ sslParameters: Option[SSLParameters]) =
this(sslContext, None, enabledCipherSuites, enabledProtocols, clientAuth, sslParameters)
def firstSession = NegotiateNewSession(enabledCipherSuites, enabledProtocols, clientAuth, sslParameters)
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala
index 76ac3f949f..4be23b9f8a 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala
@@ -78,8 +78,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*/
def bind(interface: String, port: Int = DefaultPortForProtocol,
connectionContext: ConnectionContext = defaultServerHttpContext,
- settings: ServerSettings = ServerSettings(system),
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Source[IncomingConnection, Future[ServerBinding]] = {
+ settings: ServerSettings = ServerSettings(system),
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Source[IncomingConnection, Future[ServerBinding]] = {
val effectivePort = if (port >= 0) port else connectionContext.defaultPort
val tlsStage = sslTlsStage(connectionContext, Server)
val connections: Source[Tcp.IncomingConnection, Future[Tcp.ServerBinding]] =
@@ -104,11 +104,12 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* To configure additional settings for a server started using this method,
* use the `akka.http.server` config section or pass in a [[akka.http.scaladsl.settings.ServerSettings]] explicitly.
*/
- def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, Any],
- interface: String, port: Int = DefaultPortForProtocol,
- connectionContext: ConnectionContext = defaultServerHttpContext,
- settings: ServerSettings = ServerSettings(system),
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = {
+ def bindAndHandle(
+ handler: Flow[HttpRequest, HttpResponse, Any],
+ interface: String, port: Int = DefaultPortForProtocol,
+ connectionContext: ConnectionContext = defaultServerHttpContext,
+ settings: ServerSettings = ServerSettings(system),
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = {
def handleOneConnection(incomingConnection: IncomingConnection): Future[Done] =
try
incomingConnection.flow
@@ -145,11 +146,12 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* To configure additional settings for a server started using this method,
* use the `akka.http.server` config section or pass in a [[akka.http.scaladsl.settings.ServerSettings]] explicitly.
*/
- def bindAndHandleSync(handler: HttpRequest ⇒ HttpResponse,
- interface: String, port: Int = DefaultPortForProtocol,
- connectionContext: ConnectionContext = defaultServerHttpContext,
- settings: ServerSettings = ServerSettings(system),
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] =
+ def bindAndHandleSync(
+ handler: HttpRequest ⇒ HttpResponse,
+ interface: String, port: Int = DefaultPortForProtocol,
+ connectionContext: ConnectionContext = defaultServerHttpContext,
+ settings: ServerSettings = ServerSettings(system),
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] =
bindAndHandle(Flow[HttpRequest].map(handler), interface, port, connectionContext, settings, log)
/**
@@ -162,12 +164,13 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* To configure additional settings for a server started using this method,
* use the `akka.http.server` config section or pass in a [[akka.http.scaladsl.settings.ServerSettings]] explicitly.
*/
- def bindAndHandleAsync(handler: HttpRequest ⇒ Future[HttpResponse],
- interface: String, port: Int = DefaultPortForProtocol,
- connectionContext: ConnectionContext = defaultServerHttpContext,
- settings: ServerSettings = ServerSettings(system),
- parallelism: Int = 1,
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] =
+ def bindAndHandleAsync(
+ handler: HttpRequest ⇒ Future[HttpResponse],
+ interface: String, port: Int = DefaultPortForProtocol,
+ connectionContext: ConnectionContext = defaultServerHttpContext,
+ settings: ServerSettings = ServerSettings(system),
+ parallelism: Int = 1,
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] =
bindAndHandle(Flow[HttpRequest].mapAsync(parallelism)(handler), interface, port, connectionContext, settings, log)
type ServerLayer = Http.ServerLayer
@@ -185,9 +188,10 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* can only be materialized once. The `remoteAddress`, if provided, will be added as a header to each [[akka.http.scaladsl.model.HttpRequest]]
* this layer produces if the `akka.http.server.remote-address-header` configuration option is enabled.
*/
- def serverLayer(settings: ServerSettings,
- remoteAddress: Option[InetSocketAddress] = None,
- log: LoggingAdapter = system.log)(implicit mat: Materializer): ServerLayer =
+ def serverLayer(
+ settings: ServerSettings,
+ remoteAddress: Option[InetSocketAddress] = None,
+ log: LoggingAdapter = system.log)(implicit mat: Materializer): ServerLayer =
HttpServerBluePrint(settings, remoteAddress, log)
// ** CLIENT ** //
@@ -204,8 +208,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*/
def outgoingConnection(host: String, port: Int = 80,
localAddress: Option[InetSocketAddress] = None,
- settings: ClientConnectionSettings = ClientConnectionSettings(system),
- log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] =
+ settings: ClientConnectionSettings = ClientConnectionSettings(system),
+ log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] =
_outgoingConnection(host, port, localAddress, settings, ConnectionContext.noEncryption(), log)
/**
@@ -218,18 +222,19 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* use the `akka.http.client` config section or pass in a [[akka.http.scaladsl.settings.ClientConnectionSettings]] explicitly.
*/
def outgoingConnectionHttps(host: String, port: Int = 443,
- connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
- localAddress: Option[InetSocketAddress] = None,
- settings: ClientConnectionSettings = ClientConnectionSettings(system),
- log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] =
+ connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
+ localAddress: Option[InetSocketAddress] = None,
+ settings: ClientConnectionSettings = ClientConnectionSettings(system),
+ log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] =
_outgoingConnection(host, port, localAddress, settings, connectionContext, log)
- private def _outgoingConnection(host: String,
- port: Int,
- localAddress: Option[InetSocketAddress],
- settings: ClientConnectionSettings,
- connectionContext: ConnectionContext,
- log: LoggingAdapter): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = {
+ private def _outgoingConnection(
+ host: String,
+ port: Int,
+ localAddress: Option[InetSocketAddress],
+ settings: ClientConnectionSettings,
+ connectionContext: ConnectionContext,
+ log: LoggingAdapter): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = {
val hostHeader = if (port == connectionContext.defaultPort) Host(host) else Host(host, port)
val layer = clientLayer(hostHeader, settings, log)
layer.joinMat(_outgoingTlsConnectionLayer(host, port, localAddress, settings, connectionContext, log))(Keep.right)
@@ -238,7 +243,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
private def _outgoingTlsConnectionLayer(host: String, port: Int, localAddress: Option[InetSocketAddress],
settings: ClientConnectionSettings, connectionContext: ConnectionContext,
log: LoggingAdapter): Flow[SslTlsOutbound, SslTlsInbound, Future[OutgoingConnection]] = {
- val tlsStage = sslTlsStage(connectionContext, Client, Some(host -> port))
+ val tlsStage = sslTlsStage(connectionContext, Client, Some(host → port))
val transportFlow = Tcp().outgoingConnection(new InetSocketAddress(host, port), localAddress,
settings.socketOptions, halfClose = true, settings.connectingTimeout, settings.idleTimeout)
@@ -260,9 +265,10 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
/**
* Constructs a [[akka.http.scaladsl.Http.ClientLayer]] stage using the given [[akka.http.scaladsl.settings.ClientConnectionSettings]].
*/
- def clientLayer(hostHeader: Host,
- settings: ClientConnectionSettings,
- log: LoggingAdapter = system.log): ClientLayer =
+ def clientLayer(
+ hostHeader: Host,
+ settings: ClientConnectionSettings,
+ log: LoggingAdapter = system.log): ClientLayer =
OutgoingConnectionBlueprint(hostHeader, settings, log)
// ** CONNECTION POOL ** //
@@ -286,7 +292,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*/
def newHostConnectionPool[T](host: String, port: Int = 80,
settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
val cps = ConnectionPoolSetup(settings, ConnectionContext.noEncryption(), log)
newHostConnectionPool(HostConnectionPoolSetup(host, port, cps))
}
@@ -302,8 +308,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*/
def newHostConnectionPoolHttps[T](host: String, port: Int = 443,
connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
- settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
+ settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
val cps = ConnectionPoolSetup(settings, connectionContext, log)
newHostConnectionPool(HostConnectionPoolSetup(host, port, cps))
}
@@ -325,7 +331,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* object of type `T` from the application which is emitted together with the corresponding response.
*/
private[akka] def newHostConnectionPool[T](setup: HostConnectionPoolSetup)(
- implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
+ implicit
+ fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
val gateway = new PoolGateway(poolMasterActorRef, setup, PoolGateway.newUniqueGatewayIdentifier)
gatewayClientFlow(setup, gateway.startPool())
}
@@ -352,7 +359,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*/
def cachedHostConnectionPool[T](host: String, port: Int = 80,
settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
val cps = ConnectionPoolSetup(settings, ConnectionContext.noEncryption(), log)
val setup = HostConnectionPoolSetup(host, port, cps)
cachedHostConnectionPool(setup)
@@ -369,8 +376,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*/
def cachedHostConnectionPoolHttps[T](host: String, port: Int = 443,
connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
- settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
+ settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
val cps = ConnectionPoolSetup(settings, connectionContext, log)
val setup = HostConnectionPoolSetup(host, port, cps)
cachedHostConnectionPool(setup)
@@ -394,7 +401,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* object of type `T` from the application which is emitted together with the corresponding response.
*/
private def cachedHostConnectionPool[T](setup: HostConnectionPoolSetup)(
- implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
+ implicit
+ fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = {
gatewayClientFlow(setup, sharedGateway(setup).startPool())
}
@@ -415,10 +423,11 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* To configure additional settings for the pool (and requests made using it),
* use the `akka.http.host-connection-pool` config section or pass in a [[ConnectionPoolSettings]] explicitly.
*/
- def superPool[T](connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
- settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] =
- clientFlow[T](settings) { request ⇒ request -> sharedGateway(request, settings, connectionContext, log) }
+ def superPool[T](
+ connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
+ settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] =
+ clientFlow[T](settings) { request ⇒ request → sharedGateway(request, settings, connectionContext, log) }
/**
* Fires a single [[akka.http.scaladsl.model.HttpRequest]] across the (cached) host connection pool for the request's
@@ -429,10 +438,11 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*
* Note that the request must have an absolute URI, otherwise the future will be completed with an error.
*/
- def singleRequest(request: HttpRequest,
- connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
- settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
- log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[HttpResponse] =
+ def singleRequest(
+ request: HttpRequest,
+ connectionContext: HttpsConnectionContext = defaultClientHttpsContext,
+ settings: ConnectionPoolSettings = defaultConnectionPoolSettings,
+ log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[HttpResponse] =
try {
val gateway = sharedGateway(request, settings, connectionContext, log)
gateway(request)
@@ -446,9 +456,10 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*
* The layer is not reusable and must only be materialized once.
*/
- def webSocketClientLayer(request: WebSocketRequest,
- settings: ClientConnectionSettings = ClientConnectionSettings(system),
- log: LoggingAdapter = system.log): Http.WebSocketClientLayer =
+ def webSocketClientLayer(
+ request: WebSocketRequest,
+ settings: ClientConnectionSettings = ClientConnectionSettings(system),
+ log: LoggingAdapter = system.log): Http.WebSocketClientLayer =
WebSocketClientBlueprint(request, settings, log)
/**
@@ -456,11 +467,12 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
*
* The layer is not reusable and must only be materialized once.
*/
- def webSocketClientFlow(request: WebSocketRequest,
- connectionContext: ConnectionContext = defaultClientHttpsContext,
- localAddress: Option[InetSocketAddress] = None,
- settings: ClientConnectionSettings = ClientConnectionSettings(system),
- log: LoggingAdapter = system.log): Flow[Message, Message, Future[WebSocketUpgradeResponse]] = {
+ def webSocketClientFlow(
+ request: WebSocketRequest,
+ connectionContext: ConnectionContext = defaultClientHttpsContext,
+ localAddress: Option[InetSocketAddress] = None,
+ settings: ClientConnectionSettings = ClientConnectionSettings(system),
+ log: LoggingAdapter = system.log): Flow[Message, Message, Future[WebSocketUpgradeResponse]] = {
import request.uri
require(uri.isAbsolute, s"WebSocket request URI must be absolute but was '$uri'")
@@ -483,12 +495,13 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
* Runs a single WebSocket conversation given a Uri and a flow that represents the client side of the
* WebSocket conversation.
*/
- def singleWebSocketRequest[T](request: WebSocketRequest,
- clientFlow: Flow[Message, Message, T],
- connectionContext: ConnectionContext = defaultClientHttpsContext,
- localAddress: Option[InetSocketAddress] = None,
- settings: ClientConnectionSettings = ClientConnectionSettings(system),
- log: LoggingAdapter = system.log)(implicit mat: Materializer): (Future[WebSocketUpgradeResponse], T) =
+ def singleWebSocketRequest[T](
+ request: WebSocketRequest,
+ clientFlow: Flow[Message, Message, T],
+ connectionContext: ConnectionContext = defaultClientHttpsContext,
+ localAddress: Option[InetSocketAddress] = None,
+ settings: ClientConnectionSettings = ClientConnectionSettings(system),
+ log: LoggingAdapter = system.log)(implicit mat: Materializer): (Future[WebSocketUpgradeResponse], T) =
webSocketClientFlow(request, connectionContext, localAddress, settings, log)
.joinMat(clientFlow)(Keep.both).run()
@@ -565,21 +578,24 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte
private def sharedGateway(hcps: HostConnectionPoolSetup): PoolGateway =
new PoolGateway(poolMasterActorRef, hcps, PoolGateway.SharedGateway)(systemMaterializer)
- private def gatewayClientFlow[T](hcps: HostConnectionPoolSetup,
- gateway: PoolGateway)(
- implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] =
- clientFlow[T](hcps.setup.settings)(_ -> gateway)
+ private def gatewayClientFlow[T](
+ hcps: HostConnectionPoolSetup,
+ gateway: PoolGateway)(
+ implicit
+ fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] =
+ clientFlow[T](hcps.setup.settings)(_ → gateway)
.mapMaterializedValue(_ ⇒ HostConnectionPool(hcps)(gateway))
private def clientFlow[T](settings: ConnectionPoolSettings)(f: HttpRequest ⇒ (HttpRequest, PoolGateway))(
- implicit system: ActorSystem, fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] = {
+ implicit
+ system: ActorSystem, fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] = {
// a connection pool can never have more than pipeliningLimit * maxConnections requests in flight at any point
val parallelism = settings.pipeliningLimit * settings.maxConnections
Flow[(HttpRequest, T)].mapAsyncUnordered(parallelism) {
case (request, userContext) ⇒
val (effectiveRequest, gateway) = f(request)
val result = Promise[(Try[HttpResponse], T)]() // TODO: simplify to `transformWith` when on Scala 2.12
- gateway(effectiveRequest).onComplete(responseTry ⇒ result.success(responseTry -> userContext))(fm.executionContext)
+ gateway(effectiveRequest).onComplete(responseTry ⇒ result.success(responseTry → userContext))(fm.executionContext)
result.future
}
}
@@ -672,9 +688,9 @@ object Http extends ExtensionId[HttpExt] with ExtensionIdProvider {
* Represents one accepted incoming HTTP connection.
*/
final case class IncomingConnection(
- localAddress: InetSocketAddress,
+ localAddress: InetSocketAddress,
remoteAddress: InetSocketAddress,
- flow: Flow[HttpResponse, HttpRequest, NotUsed]) {
+ flow: Flow[HttpResponse, HttpRequest, NotUsed]) {
/**
* Handles the connection with the given flow, which is materialized exactly once
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala
index 90b4d4a376..f70316b5e3 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala
@@ -11,15 +11,16 @@ import akka.http.impl.util._
* Does not support TimeZones, all DateTime values are always GMT based.
* Note that this implementation discards milliseconds (i.e. rounds down to full seconds).
*/
-final case class DateTime private (year: Int, // the year
- month: Int, // the month of the year. January is 1.
- day: Int, // the day of the month. The first day is 1.
- hour: Int, // the hour of the day. The first hour is 0.
- minute: Int, // the minute of the hour. The first minute is 0.
- second: Int, // the second of the minute. The first second is 0.
- weekday: Int, // the day of the week. Sunday is 0.
- clicks: Long, // milliseconds since January 1, 1970, 00:00:00 GMT
- isLeapYear: Boolean) extends akka.http.javadsl.model.DateTime with Ordered[DateTime] with Renderable {
+final case class DateTime private (
+ year: Int, // the year
+ month: Int, // the month of the year. January is 1.
+ day: Int, // the day of the month. The first day is 1.
+ hour: Int, // the hour of the day. The first hour is 0.
+ minute: Int, // the minute of the hour. The first minute is 0.
+ second: Int, // the second of the minute. The first second is 0.
+ weekday: Int, // the day of the week. Sunday is 0.
+ clicks: Long, // milliseconds since January 1, 1970, 00:00:00 GMT
+ isLeapYear: Boolean) extends akka.http.javadsl.model.DateTime with Ordered[DateTime] with Renderable {
/**
* The day of the week as a 3 letter abbreviation:
* `Sun`, `Mon`, `Tue`, `Wed`, `Thu`, `Fri` or `Sat`
@@ -42,7 +43,6 @@ final case class DateTime private (year: Int, // the year
*/
def -(millis: Long): DateTime = DateTime(clicks - millis)
-
/**
* Creates a new `DateTime` that represents the point in time the given number of ms earlier.
*/
@@ -172,7 +172,8 @@ object DateTime {
* Note that this implementation discards milliseconds (i.e. rounds down to full seconds).
*/
def apply(clicks: Long): DateTime = {
- require(DateTime.MinValue.clicks <= clicks && clicks <= DateTime.MaxValue.clicks,
+ require(
+ DateTime.MinValue.clicks <= clicks && clicks <= DateTime.MaxValue.clicks,
"DateTime value must be >= " + DateTime.MinValue + " and <= " + DateTime.MaxValue)
// based on a fast RFC1123 implementation (C) 2000 by Tim Kientzle
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala
index 8d9cb297f9..c07ccd8db5 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala
@@ -11,7 +11,7 @@ import akka.http.impl.model.JavaInitialization
import language.implicitConversions
import java.io.File
import java.nio.file.{ Path, Files }
-import java.lang.{ Iterable ⇒ JIterable}
+import java.lang.{ Iterable ⇒ JIterable }
import scala.util.control.NonFatal
import scala.concurrent.Future
import scala.concurrent.duration._
@@ -24,7 +24,7 @@ import akka.{ NotUsed, stream }
import akka.http.scaladsl.model.ContentType.{ NonBinary, Binary }
import akka.http.scaladsl.util.FastFuture
import akka.http.javadsl.{ model ⇒ jm }
-import akka.http.impl.util.{JavaMapping, StreamUtils}
+import akka.http.impl.util.{ JavaMapping, StreamUtils }
import akka.http.impl.util.JavaMapping.Implicits._
import scala.compat.java8.OptionConverters._
@@ -341,9 +341,10 @@ object HttpEntity {
/**
* The model for the entity of a "regular" unchunked HTTP message with a known non-zero length.
*/
- final case class Default(contentType: ContentType,
- contentLength: Long,
- data: Source[ByteString, Any])
+ final case class Default(
+ contentType: ContentType,
+ contentLength: Long,
+ data: Source[ByteString, Any])
extends jm.HttpEntity.Default with UniversalEntity {
require(contentLength > 0, "contentLength must be positive (use `HttpEntity.empty(contentType)` for empty entities)")
def isKnownEmpty = false
@@ -592,18 +593,18 @@ object HttpEntity {
*/
private[http] def captureTermination[T <: HttpEntity](entity: T): (T, Future[Unit]) =
entity match {
- case x: HttpEntity.Strict ⇒ x.asInstanceOf[T] -> FastFuture.successful(())
+ case x: HttpEntity.Strict ⇒ x.asInstanceOf[T] → FastFuture.successful(())
case x: HttpEntity.Default ⇒
val (newData, whenCompleted) = StreamUtils.captureTermination(x.data)
- x.copy(data = newData).asInstanceOf[T] -> whenCompleted
+ x.copy(data = newData).asInstanceOf[T] → whenCompleted
case x: HttpEntity.Chunked ⇒
val (newChunks, whenCompleted) = StreamUtils.captureTermination(x.chunks)
- x.copy(chunks = newChunks).asInstanceOf[T] -> whenCompleted
+ x.copy(chunks = newChunks).asInstanceOf[T] → whenCompleted
case x: HttpEntity.CloseDelimited ⇒
val (newData, whenCompleted) = StreamUtils.captureTermination(x.data)
- x.copy(data = newData).asInstanceOf[T] -> whenCompleted
+ x.copy(data = newData).asInstanceOf[T] → whenCompleted
case x: HttpEntity.IndefiniteLength ⇒
val (newData, whenCompleted) = StreamUtils.captureTermination(x.data)
- x.copy(data = newData).asInstanceOf[T] -> whenCompleted
+ x.copy(data = newData).asInstanceOf[T] → whenCompleted
}
}
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala
index 827a7a6551..65347d068d 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala
@@ -16,7 +16,7 @@ import scala.compat.java8.OptionConverters._
import scala.reflect.{ classTag, ClassTag }
import akka.parboiled2.CharUtils
import akka.stream.Materializer
-import akka.util.{HashCode, ByteString}
+import akka.util.{ HashCode, ByteString }
import akka.http.impl.util._
import akka.http.javadsl.{ model ⇒ jm }
import akka.http.scaladsl.util.FastFuture._
@@ -85,9 +85,9 @@ sealed trait HttpMessage extends jm.HttpMessage {
def header[T <: jm.HttpHeader: ClassTag]: Option[T] = {
val erasure = classTag[T].runtimeClass
headers.find(erasure.isInstance).asInstanceOf[Option[T]] match {
- case header: Some[T] => header
- case _ if erasure == classOf[`Content-Type`] => Some(entity.contentType).asInstanceOf[Option[T]]
- case _ => None
+ case header: Some[T] ⇒ header
+ case _ if erasure == classOf[`Content-Type`] ⇒ Some(entity.contentType).asInstanceOf[Option[T]]
+ case _ ⇒ None
}
}
@@ -145,16 +145,17 @@ object HttpMessage {
* The immutable model HTTP request model.
*/
final class HttpRequest(
- val method: HttpMethod,
- val uri: Uri,
- val headers: immutable.Seq[HttpHeader],
- val entity: RequestEntity,
- val protocol: HttpProtocol)
+ val method: HttpMethod,
+ val uri: Uri,
+ val headers: immutable.Seq[HttpHeader],
+ val entity: RequestEntity,
+ val protocol: HttpProtocol)
extends jm.HttpRequest with HttpMessage {
HttpRequest.verifyUri(uri)
require(entity.isKnownEmpty || method.isEntityAccepted, s"Requests with method '${method.value}' must have an empty entity")
- require(protocol != HttpProtocols.`HTTP/1.0` || !entity.isInstanceOf[HttpEntity.Chunked],
+ require(
+ protocol != HttpProtocols.`HTTP/1.0` || !entity.isInstanceOf[HttpEntity.Chunked],
"HTTP/1.0 requests must not have a chunked entity")
type Self = HttpRequest
@@ -212,13 +213,12 @@ final class HttpRequest(
/* Manual Case Class things, to easen bin-compat */
- def copy(method: HttpMethod = method,
- uri: Uri = uri,
- headers: immutable.Seq[HttpHeader] = headers,
- entity: RequestEntity = entity,
- protocol: HttpProtocol = protocol) = new HttpRequest(method, uri, headers, entity, protocol)
-
-
+ def copy(
+ method: HttpMethod = method,
+ uri: Uri = uri,
+ headers: immutable.Seq[HttpHeader] = headers,
+ entity: RequestEntity = entity,
+ protocol: HttpProtocol = protocol) = new HttpRequest(method, uri, headers, entity, protocol)
override def hashCode(): Int = {
var result = HashCode.SEED
@@ -231,13 +231,13 @@ final class HttpRequest(
}
override def equals(obj: scala.Any): Boolean = obj match {
- case HttpRequest(_method, _uri, _headers, _entity, _protocol) =>
+ case HttpRequest(_method, _uri, _headers, _entity, _protocol) ⇒
method == _method &&
uri == _uri &&
headers == _headers &&
entity == _entity &&
protocol == _protocol
- case _ => false
+ case _ ⇒ false
}
override def toString = s"""HttpRequest(${_1},${_2},${_3},${_4},${_5})"""
@@ -273,7 +273,8 @@ object HttpRequest {
} else // http://tools.ietf.org/html/rfc7230#section-5.4
if (hostHeader.isEmpty || uri.authority.isEmpty && hostHeader.get.isEmpty ||
hostHeader.get.host.equalsIgnoreCase(uri.authority.host) && hostHeader.get.port == uri.authority.port) uri
- else throw IllegalUriException(s"'Host' header value of request to `$uri` doesn't match request target authority",
+ else throw IllegalUriException(
+ s"'Host' header value of request to `$uri` doesn't match request target authority",
s"Host header: $hostHeader\nrequest target authority: ${uri.authority}")
}
@@ -295,11 +296,12 @@ object HttpRequest {
/* Manual Case Class things, to easen bin-compat */
- def apply(method: HttpMethod = HttpMethods.GET,
- uri: Uri = Uri./,
- headers: immutable.Seq[HttpHeader] = Nil,
- entity: RequestEntity = HttpEntity.Empty,
- protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) = new HttpRequest(method, uri, headers, entity, protocol)
+ def apply(
+ method: HttpMethod = HttpMethods.GET,
+ uri: Uri = Uri./,
+ headers: immutable.Seq[HttpHeader] = Nil,
+ entity: RequestEntity = HttpEntity.Empty,
+ protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) = new HttpRequest(method, uri, headers, entity, protocol)
def unapply(any: HttpRequest) = new OptHttpRequest(any)
}
@@ -308,14 +310,15 @@ object HttpRequest {
* The immutable HTTP response model.
*/
final class HttpResponse(
- val status: StatusCode,
- val headers: immutable.Seq[HttpHeader],
- val entity: ResponseEntity,
- val protocol: HttpProtocol)
+ val status: StatusCode,
+ val headers: immutable.Seq[HttpHeader],
+ val entity: ResponseEntity,
+ val protocol: HttpProtocol)
extends jm.HttpResponse with HttpMessage {
require(entity.isKnownEmpty || status.allowsEntity, "Responses with this status code must have an empty entity")
- require(protocol == HttpProtocols.`HTTP/1.1` || !entity.isInstanceOf[HttpEntity.Chunked],
+ require(
+ protocol == HttpProtocols.`HTTP/1.1` || !entity.isInstanceOf[HttpEntity.Chunked],
"HTTP/1.0 responses must not have a chunked entity")
type Self = HttpResponse
@@ -341,19 +344,19 @@ final class HttpResponse(
/* Manual Case Class things, to easen bin-compat */
- def copy(status: StatusCode = status,
- headers: immutable.Seq[HttpHeader] = headers,
- entity: ResponseEntity = entity,
- protocol: HttpProtocol = protocol) = new HttpResponse(status, headers, entity, protocol)
-
+ def copy(
+ status: StatusCode = status,
+ headers: immutable.Seq[HttpHeader] = headers,
+ entity: ResponseEntity = entity,
+ protocol: HttpProtocol = protocol) = new HttpResponse(status, headers, entity, protocol)
override def equals(obj: scala.Any): Boolean = obj match {
- case HttpResponse(_status, _headers, _entity, _protocol) =>
+ case HttpResponse(_status, _headers, _entity, _protocol) ⇒
status == _status &&
headers == _headers &&
entity == _entity &&
protocol == _protocol
- case _ => false
+ case _ ⇒ false
}
override def hashCode: Int = {
@@ -378,10 +381,11 @@ final class HttpResponse(
object HttpResponse {
/* Manual Case Class things, to easen bin-compat */
- def apply(status: StatusCode = StatusCodes.OK,
- headers: immutable.Seq[HttpHeader] = Nil,
- entity: ResponseEntity = HttpEntity.Empty,
- protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) = new HttpResponse(status, headers, entity, protocol)
+ def apply(
+ status: StatusCode = StatusCodes.OK,
+ headers: immutable.Seq[HttpHeader] = Nil,
+ entity: ResponseEntity = HttpEntity.Empty,
+ protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) = new HttpResponse(status, headers, entity, protocol)
def unapply(any: HttpResponse): OptHttpResponse = new OptHttpResponse(any)
}
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala
index 2184e1d14a..7ee304c81f 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala
@@ -29,10 +29,11 @@ object RequestEntityAcceptance {
* @param isIdempotent true if requests can be safely (& automatically) repeated
* @param requestEntityAcceptance Expected if meaning of request entities is properly defined
*/
-final case class HttpMethod private[http] (override val value: String,
- isSafe: Boolean,
- isIdempotent: Boolean,
- requestEntityAcceptance: RequestEntityAcceptance) extends jm.HttpMethod with SingletonValueRenderable {
+final case class HttpMethod private[http] (
+ override val value: String,
+ isSafe: Boolean,
+ isIdempotent: Boolean,
+ requestEntityAcceptance: RequestEntityAcceptance) extends jm.HttpMethod with SingletonValueRenderable {
override def isEntityAccepted: Boolean = requestEntityAcceptance.isEntityAccepted
override def toString: String = s"HttpMethod($value)"
}
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala
index f4119ab98b..ed8c385099 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala
@@ -50,8 +50,8 @@ sealed abstract class MediaRange extends jm.MediaRange with Renderable with With
object MediaRange {
private[http] def splitOffQValue(params: Map[String, String], defaultQ: Float = 1.0f): (Map[String, String], Float) =
params.get("q") match {
- case Some(x) ⇒ (params - "q") -> (try x.toFloat catch { case _: NumberFormatException ⇒ 1.0f })
- case None ⇒ params -> defaultQ
+ case Some(x) ⇒ (params - "q") → (try x.toFloat catch { case _: NumberFormatException ⇒ 1.0f })
+ case None ⇒ params → defaultQ
}
private final case class Custom(mainType: String, params: Map[String, String], qValue: Float)
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala
index 7ab33fa5e9..dc78006599 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala
@@ -126,8 +126,8 @@ object MediaType {
}
def customWithFixedCharset(mainType: String, subType: String, charset: HttpCharset, fileExtensions: List[String] = Nil,
- params: Map[String, String] = Map.empty,
- allowArbitrarySubtypes: Boolean = false): WithFixedCharset = {
+ params: Map[String, String] = Map.empty,
+ allowArbitrarySubtypes: Boolean = false): WithFixedCharset = {
require(mainType != "multipart", "Cannot create a MediaType.Multipart here, use `customMultipart` instead!")
require(allowArbitrarySubtypes || subType != "*", "Cannot create a MediaRange here, use `MediaRange.custom` instead!")
val _params = params
@@ -143,8 +143,8 @@ object MediaType {
}
def customWithOpenCharset(mainType: String, subType: String, fileExtensions: List[String] = Nil,
- params: Map[String, String] = Map.empty,
- allowArbitrarySubtypes: Boolean = false): WithOpenCharset = {
+ params: Map[String, String] = Map.empty,
+ allowArbitrarySubtypes: Boolean = false): WithOpenCharset = {
require(mainType != "multipart", "Cannot create a MediaType.Multipart here, use `customMultipart` instead!")
require(allowArbitrarySubtypes || subType != "*", "Cannot create a MediaRange here, use `MediaRange.custom` instead!")
val _params = params
@@ -258,7 +258,7 @@ object MediaType {
}
object MediaTypes extends ObjectRegistry[(String, String), MediaType] {
- type FindCustom = (String, String) => Option[MediaType]
+ type FindCustom = (String, String) ⇒ Option[MediaType]
private[this] var extensionMap = Map.empty[String, MediaType]
@@ -276,7 +276,7 @@ object MediaTypes extends ObjectRegistry[(String, String), MediaType] {
private def register[T <: MediaType](mediaType: T): T = {
registerFileExtensions(mediaType)
- register(mediaType.mainType.toRootLowerCase -> mediaType.subType.toRootLowerCase, mediaType)
+ register(mediaType.mainType.toRootLowerCase → mediaType.subType.toRootLowerCase, mediaType)
}
import MediaType._
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala
index fdbd6f7b98..7275ff8dab 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala
@@ -56,8 +56,9 @@ sealed trait Multipart extends jm.Multipart {
/**
* Creates a [[akka.http.scaladsl.model.MessageEntity]] from this multipart object.
*/
- def toEntity(charset: HttpCharset = HttpCharsets.`UTF-8`,
- boundary: String = BodyPartRenderer.randomBoundary())(implicit log: LoggingAdapter = NoLogging): MessageEntity = {
+ def toEntity(
+ charset: HttpCharset = HttpCharsets.`UTF-8`,
+ boundary: String = BodyPartRenderer.randomBoundary())(implicit log: LoggingAdapter = NoLogging): MessageEntity = {
val chunks =
parts
.transform(() ⇒ BodyPartRenderer.streamed(boundary, charset.nioCharset, partHeadersSizeHint = 128, log))
@@ -224,7 +225,7 @@ object Multipart {
}
def unapply(value: Multipart.General): Option[(MediaType.Multipart, Source[Multipart.General.BodyPart, Any])] =
- Some(value.mediaType -> value.parts)
+ Some(value.mediaType → value.parts)
/**
* Strict [[General]] multipart content.
@@ -284,7 +285,7 @@ object Multipart {
override def toString = s"General.BodyPart($entity, $headers)"
}
- def unapply(value: BodyPart): Option[(BodyPartEntity, immutable.Seq[HttpHeader])] = Some(value.entity -> value.headers)
+ def unapply(value: BodyPart): Option[(BodyPartEntity, immutable.Seq[HttpHeader])] = Some(value.entity → value.headers)
/**
* Strict [[General.BodyPart]].
@@ -429,8 +430,8 @@ object Multipart {
}
object BodyPart {
def apply(_name: String, _entity: BodyPartEntity,
- _additionalDispositionParams: Map[String, String] = Map.empty,
- _additionalHeaders: immutable.Seq[HttpHeader] = Nil): Multipart.FormData.BodyPart =
+ _additionalDispositionParams: Map[String, String] = Map.empty,
+ _additionalHeaders: immutable.Seq[HttpHeader] = Nil): Multipart.FormData.BodyPart =
new Multipart.FormData.BodyPart {
def name = _name
def additionalDispositionParams = _additionalDispositionParams
@@ -450,7 +451,7 @@ object Multipart {
* Creates a BodyPart backed by a file that will be streamed using a FileSource.
*/
def fromPath(name: String, contentType: ContentType, file: Path, chunkSize: Int = -1): BodyPart =
- BodyPart(name, HttpEntity.fromPath(contentType, file, chunkSize), Map("filename" -> file.getFileName.toString))
+ BodyPart(name, HttpEntity.fromPath(contentType, file, chunkSize), Map("filename" → file.getFileName.toString))
def unapply(value: BodyPart): Option[(String, BodyPartEntity, Map[String, String], immutable.Seq[HttpHeader])] =
Some((value.name, value.entity, value.additionalDispositionParams, value.additionalHeaders))
@@ -459,8 +460,8 @@ object Multipart {
* Strict [[FormData.BodyPart]].
*/
case class Strict(name: String, entity: HttpEntity.Strict,
- additionalDispositionParams: Map[String, String] = Map.empty,
- additionalHeaders: immutable.Seq[HttpHeader] = Nil)
+ additionalDispositionParams: Map[String, String] = Map.empty,
+ additionalHeaders: immutable.Seq[HttpHeader] = Nil)
extends Multipart.FormData.BodyPart with Multipart.BodyPart.Strict with jm.Multipart.FormData.BodyPart.Strict {
override def toStrict(timeout: FiniteDuration)(implicit fm: Materializer): Future[Multipart.FormData.BodyPart.Strict] =
FastFuture.successful(this)
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala
index 9c99da0fb5..6a91a8151c 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala
@@ -600,9 +600,9 @@ object Uri {
}
private val defaultPorts: Map[String, Int] =
- Map("ftp" -> 21, "ssh" -> 22, "telnet" -> 23, "smtp" -> 25, "domain" -> 53, "tftp" -> 69, "http" -> 80, "ws" -> 80,
- "pop3" -> 110, "nntp" -> 119, "imap" -> 143, "snmp" -> 161, "ldap" -> 389, "https" -> 443, "wss" -> 443, "imaps" -> 993,
- "nfs" -> 2049).withDefaultValue(-1)
+ Map("ftp" → 21, "ssh" → 22, "telnet" → 23, "smtp" → 25, "domain" → 53, "tftp" → 69, "http" → 80, "ws" → 80,
+ "pop3" → 110, "nntp" → 119, "imap" → 143, "snmp" → 161, "ldap" → 389, "https" → 443, "wss" → 443, "imaps" → 993,
+ "nfs" → 2049).withDefaultValue(-1)
sealed trait ParsingMode extends akka.http.javadsl.model.Uri.ParsingMode
object ParsingMode {
@@ -732,9 +732,9 @@ object Uri {
case Slash(Segment("..", tail)) ⇒ process(
input = if (tail.isEmpty) Path./ else tail,
output =
- if (output.startsWithSegment)
- if (output.tail.startsWithSlash) output.tail.tail else tail
- else output)
+ if (output.startsWithSegment)
+ if (output.tail.startsWithSlash) output.tail.tail else tail
+ else output)
case Segment("." | "..", tail) ⇒ process(tail, output)
case Slash(tail) ⇒ process(tail, Slash(output))
case Segment(string, tail) ⇒ process(tail, string :: output)
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala
index ec5d07efce..5c27f3ead6 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala
@@ -17,7 +17,7 @@ import scala.compat.java8.OptionConverters._
// see http://tools.ietf.org/html/rfc6265
// sealed abstract to prevent generation of default apply method in companion
sealed abstract case class HttpCookiePair private (
- name: String,
+ name: String,
value: String) extends jm.headers.HttpCookiePair with ToStringRenderable {
def render[R <: Rendering](r: R): r.type = r ~~ name ~~ '=' ~~ value
@@ -50,15 +50,15 @@ object HttpCookiePair {
// see http://tools.ietf.org/html/rfc6265
final case class HttpCookie(
- name: String,
- value: String,
- expires: Option[DateTime] = None,
- maxAge: Option[Long] = None,
- domain: Option[String] = None,
- path: Option[String] = None,
- secure: Boolean = false,
- httpOnly: Boolean = false,
- extension: Option[String] = None) extends jm.headers.HttpCookie with ToStringRenderable {
+ name: String,
+ value: String,
+ expires: Option[DateTime] = None,
+ maxAge: Option[Long] = None,
+ domain: Option[String] = None,
+ path: Option[String] = None,
+ secure: Boolean = false,
+ httpOnly: Boolean = false,
+ extension: Option[String] = None) extends jm.headers.HttpCookie with ToStringRenderable {
/** Returns the name/value pair for this cookie, to be used in [[Cookie]] headers. */
def pair: HttpCookiePair = HttpCookiePair(name, value)
@@ -111,14 +111,15 @@ final case class HttpCookie(
}
object HttpCookie {
- def fromPair(pair: HttpCookiePair,
- expires: Option[DateTime] = None,
- maxAge: Option[Long] = None,
- domain: Option[String] = None,
- path: Option[String] = None,
- secure: Boolean = false,
- httpOnly: Boolean = false,
- extension: Option[String] = None): HttpCookie =
+ def fromPair(
+ pair: HttpCookiePair,
+ expires: Option[DateTime] = None,
+ maxAge: Option[Long] = None,
+ domain: Option[String] = None,
+ path: Option[String] = None,
+ secure: Boolean = false,
+ httpOnly: Boolean = false,
+ extension: Option[String] = None): HttpCookie =
HttpCookie(pair.name, pair.value, expires, maxAge, domain, path, secure, httpOnly, extension)
import akka.http.impl.model.parser.CharacterClasses._
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala
index a5510aefce..15e3844a1e 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala
@@ -641,7 +641,7 @@ final case class RawHeader(name: String, value: String) extends jm.headers.RawHe
}
object RawHeader {
def unapply[H <: HttpHeader](customHeader: H): Option[(String, String)] =
- Some(customHeader.name -> customHeader.value)
+ Some(customHeader.name → customHeader.value)
}
object `Raw-Request-URI` extends ModeledCompanion[`Raw-Request-URI`]
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala
index 336082e95f..1ebb912906 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala
@@ -34,8 +34,9 @@ trait UpgradeToWebSocket extends jm.ws.UpgradeToWebSocket {
*
* Optionally, a subprotocol out of the ones requested by the client can be chosen.
*/
- def handleMessages(handlerFlow: Graph[FlowShape[Message, Message], Any],
- subprotocol: Option[String] = None): HttpResponse
+ def handleMessages(
+ handlerFlow: Graph[FlowShape[Message, Message], Any],
+ subprotocol: Option[String] = None): HttpResponse
/**
* The high-level interface to create a WebSocket server based on "messages".
@@ -47,9 +48,10 @@ trait UpgradeToWebSocket extends jm.ws.UpgradeToWebSocket {
*
* Optionally, a subprotocol out of the ones requested by the client can be chosen.
*/
- def handleMessagesWithSinkSource(inSink: Graph[SinkShape[Message], Any],
- outSource: Graph[SourceShape[Message], Any],
- subprotocol: Option[String] = None): HttpResponse =
+ def handleMessagesWithSinkSource(
+ inSink: Graph[SinkShape[Message], Any],
+ outSource: Graph[SourceShape[Message], Any],
+ subprotocol: Option[String] = None): HttpResponse =
handleMessages(scaladsl.Flow.fromSinkAndSource(inSink, outSource), subprotocol)
import scala.collection.JavaConverters._
@@ -80,9 +82,10 @@ trait UpgradeToWebSocket extends jm.ws.UpgradeToWebSocket {
/**
* Java API
*/
- def handleMessagesWith(inSink: Graph[SinkShape[jm.ws.Message], _ <: Any],
- outSource: Graph[SourceShape[jm.ws.Message], _ <: Any],
- subprotocol: String): HttpResponse =
+ def handleMessagesWith(
+ inSink: Graph[SinkShape[jm.ws.Message], _ <: Any],
+ outSource: Graph[SourceShape[jm.ws.Message], _ <: Any],
+ subprotocol: String): HttpResponse =
handleMessages(createScalaFlow(inSink, outSource), subprotocol = Some(subprotocol))
private[this] def createScalaFlow(inSink: Graph[SinkShape[jm.ws.Message], _ <: Any], outSource: Graph[SourceShape[jm.ws.Message], _ <: Any]): Graph[FlowShape[Message, Message], NotUsed] =
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala
index 89e482b1cd..9a76e7f952 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala
@@ -17,9 +17,9 @@ import akka.http.scaladsl.model.{ HttpHeader, Uri }
* @param subprotocol A WebSocket subprotocol if required.
*/
final case class WebSocketRequest(
- uri: Uri,
+ uri: Uri,
extraHeaders: immutable.Seq[HttpHeader] = Nil,
- subprotocol: Option[String] = None)
+ subprotocol: Option[String] = None)
object WebSocketRequest {
implicit def fromTargetUri(uri: Uri): WebSocketRequest = WebSocketRequest(uri)
implicit def fromTargetUriString(uriString: String): WebSocketRequest = WebSocketRequest(uriString)
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala
index b5ae452d54..fa4ae90d73 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala
@@ -89,15 +89,15 @@ abstract class ParserSettings private[akka] () extends akka.http.javadsl.setting
def withErrorLoggingVerbosity(newValue: ParserSettings.ErrorLoggingVerbosity): ParserSettings = self.copy(errorLoggingVerbosity = newValue)
def withHeaderValueCacheLimits(newValue: Map[String, Int]): ParserSettings = self.copy(headerValueCacheLimits = newValue)
def withCustomMethods(methods: HttpMethod*): ParserSettings = {
- val map = methods.map(m ⇒ m.name -> m).toMap
+ val map = methods.map(m ⇒ m.name → m).toMap
self.copy(customMethods = map.get)
}
def withCustomStatusCodes(codes: StatusCode*): ParserSettings = {
- val map = codes.map(c ⇒ c.intValue -> c).toMap
+ val map = codes.map(c ⇒ c.intValue → c).toMap
self.copy(customStatusCodes = map.get)
}
def withCustomMediaTypes(types: MediaType*): ParserSettings = {
- val map = types.map(c ⇒ (c.mainType, c.subType) -> c).toMap
+ val map = types.map(c ⇒ (c.mainType, c.subType) → c).toMap
self.copy(customMediaTypes = (main, sub) ⇒ map.get((main, sub)))
}
}
diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala
index ccb1e1578b..2c8abddb16 100644
--- a/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala
+++ b/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala
@@ -80,9 +80,9 @@ object FastFuture {
def isCompleted = true
def result(atMost: Duration)(implicit permit: CanAwait) = a
def ready(atMost: Duration)(implicit permit: CanAwait) = this
- def transform[S](f: scala.util.Try[A] => scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
+ def transform[S](f: scala.util.Try[A] ⇒ scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
FastFuture(f(Success(a)))
- def transformWith[S](f: scala.util.Try[A] => scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
+ def transformWith[S](f: scala.util.Try[A] ⇒ scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
new FastFuture(this).transformWith(f)
}
private case class ErrorFuture(error: Throwable) extends Future[Nothing] {
@@ -91,9 +91,9 @@ object FastFuture {
def isCompleted = true
def result(atMost: Duration)(implicit permit: CanAwait) = throw error
def ready(atMost: Duration)(implicit permit: CanAwait) = this
- def transform[S](f: scala.util.Try[Nothing] => scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
+ def transform[S](f: scala.util.Try[Nothing] ⇒ scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
FastFuture(f(Failure(error)))
- def transformWith[S](f: scala.util.Try[Nothing] => scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
+ def transformWith[S](f: scala.util.Try[Nothing] ⇒ scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] =
new FastFuture(this).transformWith(f)
}
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala
index ff8addc3df..4fde9f5469 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala
@@ -60,7 +60,7 @@ class ConnectionPoolSpec extends AkkaSpec("""
"properly complete a simple request/response cycle" in new TestSetup {
val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]()
- requestIn.sendNext(HttpRequest(uri = "/") -> 42)
+ requestIn.sendNext(HttpRequest(uri = "/") → 42)
responseOutSub.request(1)
acceptIncomingConnection()
@@ -71,8 +71,8 @@ class ConnectionPoolSpec extends AkkaSpec("""
"open a second connection if the first one is loaded" in new TestSetup {
val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]()
- requestIn.sendNext(HttpRequest(uri = "/a") -> 42)
- requestIn.sendNext(HttpRequest(uri = "/b") -> 43)
+ requestIn.sendNext(HttpRequest(uri = "/a") → 42)
+ requestIn.sendNext(HttpRequest(uri = "/b") → 43)
responseOutSub.request(2)
acceptIncomingConnection()
@@ -100,7 +100,7 @@ class ConnectionPoolSpec extends AkkaSpec("""
case x ⇒ super.testServerHandler(connNr)(x)
}
- requestIn.sendNext(HttpRequest(uri = "/a") -> 42)
+ requestIn.sendNext(HttpRequest(uri = "/a") → 42)
responseOutSub.request(1)
acceptIncomingConnection()
val (Success(r1), 42) = responseOut.expectNext()
@@ -110,7 +110,7 @@ class ConnectionPoolSpec extends AkkaSpec("""
responseEntityPub.sendNext(ByteString("YEAH"))
responseEntityProbe.expectNext(ByteString("YEAH"))
- requestIn.sendNext(HttpRequest(uri = "/b") -> 43)
+ requestIn.sendNext(HttpRequest(uri = "/b") → 43)
responseOutSub.request(1)
acceptIncomingConnection()
val (Success(r2), 43) = responseOut.expectNext()
@@ -120,13 +120,13 @@ class ConnectionPoolSpec extends AkkaSpec("""
"not open a second connection if there is an idle one available" in new TestSetup {
val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]()
- requestIn.sendNext(HttpRequest(uri = "/a") -> 42)
+ requestIn.sendNext(HttpRequest(uri = "/a") → 42)
responseOutSub.request(1)
acceptIncomingConnection()
val (Success(response1), 42) = responseOut.expectNext()
connNr(response1) shouldEqual 1
- requestIn.sendNext(HttpRequest(uri = "/b") -> 43)
+ requestIn.sendNext(HttpRequest(uri = "/b") → 43)
responseOutSub.request(1)
val (Success(response2), 43) = responseOut.expectNext()
connNr(response2) shouldEqual 1
@@ -138,7 +138,7 @@ class ConnectionPoolSpec extends AkkaSpec("""
val N = 500
val requestIds = Source.fromIterator(() ⇒ Iterator.from(1)).take(N)
- val idSum = requestIds.map(id ⇒ HttpRequest(uri = s"/r$id") -> id).via(poolFlow).map {
+ val idSum = requestIds.map(id ⇒ HttpRequest(uri = s"/r$id") → id).via(poolFlow).map {
case (Success(response), id) ⇒
requestUri(response) should endWith(s"/r$id")
id
@@ -156,8 +156,8 @@ class ConnectionPoolSpec extends AkkaSpec("""
"properly surface connection-level errors" in new TestSetup(autoAccept = true) {
val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int](maxRetries = 0)
- requestIn.sendNext(HttpRequest(uri = "/a") -> 42)
- requestIn.sendNext(HttpRequest(uri = "/crash") -> 43)
+ requestIn.sendNext(HttpRequest(uri = "/a") → 42)
+ requestIn.sendNext(HttpRequest(uri = "/crash") → 43)
responseOutSub.request(2)
override def mapServerSideOutboundRawBytes(bytes: ByteString): ByteString =
@@ -172,8 +172,8 @@ class ConnectionPoolSpec extends AkkaSpec("""
"retry failed requests" in new TestSetup(autoAccept = true) {
val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]()
- requestIn.sendNext(HttpRequest(uri = "/a") -> 42)
- requestIn.sendNext(HttpRequest(uri = "/crash") -> 43)
+ requestIn.sendNext(HttpRequest(uri = "/a") → 42)
+ requestIn.sendNext(HttpRequest(uri = "/crash") → 43)
responseOutSub.request(2)
val remainingResponsesToKill = new AtomicInteger(1)
@@ -191,8 +191,8 @@ class ConnectionPoolSpec extends AkkaSpec("""
"respect the configured `maxRetries` value" in new TestSetup(autoAccept = true) {
val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int](maxRetries = 4)
- requestIn.sendNext(HttpRequest(uri = "/a") -> 42)
- requestIn.sendNext(HttpRequest(uri = "/crash") -> 43)
+ requestIn.sendNext(HttpRequest(uri = "/a") → 42)
+ requestIn.sendNext(HttpRequest(uri = "/crash") → 43)
responseOutSub.request(2)
val remainingResponsesToKill = new AtomicInteger(5)
@@ -222,7 +222,7 @@ class ConnectionPoolSpec extends AkkaSpec("""
Await.result(gateway.poolStatus(), 1500.millis).get shouldBe a[PoolInterfaceRunning]
awaitCond({ Await.result(gateway.poolStatus(), 1500.millis).isEmpty }, 2000.millis)
- requestIn.sendNext(HttpRequest(uri = "/") -> 42)
+ requestIn.sendNext(HttpRequest(uri = "/") → 42)
responseOutSub.request(1)
acceptIncomingConnection()
@@ -272,8 +272,8 @@ class ConnectionPoolSpec extends AkkaSpec("""
val (requestIn, responseOut, responseOutSub, hcp) = superPool[Int]()
- requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName:$serverPort/a") -> 42)
- requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName2:$serverPort2/b") -> 43)
+ requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName:$serverPort/a") → 42)
+ requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName2:$serverPort2/b") → 43)
responseOutSub.request(2)
Seq(responseOut.expectNext(), responseOut.expectNext()) foreach {
@@ -284,8 +284,9 @@ class ConnectionPoolSpec extends AkkaSpec("""
}
}
- class TestSetup(serverSettings: ServerSettings = ServerSettings(system),
- autoAccept: Boolean = false) {
+ class TestSetup(
+ serverSettings: ServerSettings = ServerSettings(system),
+ autoAccept: Boolean = false) {
val (serverEndpoint, serverHostName, serverPort) = TestUtils.temporaryServerHostnameAndPort()
def testServerHandler(connNr: Int): HttpRequest ⇒ HttpResponse = {
@@ -322,23 +323,25 @@ class ConnectionPoolSpec extends AkkaSpec("""
private def handleConnection(c: Http.IncomingConnection) =
c.handleWithSyncHandler(testServerHandler(incomingConnectionCounter.incrementAndGet()))
- def cachedHostConnectionPool[T](maxConnections: Int = 2,
- maxRetries: Int = 2,
- maxOpenRequests: Int = 8,
- pipeliningLimit: Int = 1,
- idleTimeout: Duration = 5.seconds,
- ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = {
+ def cachedHostConnectionPool[T](
+ maxConnections: Int = 2,
+ maxRetries: Int = 2,
+ maxOpenRequests: Int = 8,
+ pipeliningLimit: Int = 1,
+ idleTimeout: Duration = 5.seconds,
+ ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = {
val settings = new ConnectionPoolSettingsImpl(maxConnections, maxRetries, maxOpenRequests, pipeliningLimit,
idleTimeout, ClientConnectionSettings(system))
flowTestBench(Http().cachedHostConnectionPool[T](serverHostName, serverPort, settings))
}
- def superPool[T](maxConnections: Int = 2,
- maxRetries: Int = 2,
- maxOpenRequests: Int = 8,
- pipeliningLimit: Int = 1,
- idleTimeout: Duration = 5.seconds,
- ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = {
+ def superPool[T](
+ maxConnections: Int = 2,
+ maxRetries: Int = 2,
+ maxOpenRequests: Int = 8,
+ pipeliningLimit: Int = 1,
+ idleTimeout: Duration = 5.seconds,
+ ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = {
val settings = new ConnectionPoolSettingsImpl(maxConnections, maxRetries, maxOpenRequests, pipeliningLimit,
idleTimeout, ClientConnectionSettings(system))
flowTestBench(Http().superPool[T](settings = settings))
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/HighLevelOutgoingConnectionSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/HighLevelOutgoingConnectionSpec.scala
index 0bc7cca0b7..4e37f69027 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/HighLevelOutgoingConnectionSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/HighLevelOutgoingConnectionSpec.scala
@@ -26,7 +26,8 @@ class HighLevelOutgoingConnectionSpec extends AkkaSpec {
"be able to handle 100 pipelined requests across one connection" in Utils.assertAllStagesStopped {
val (_, serverHostName, serverPort) = TestUtils.temporaryServerHostnameAndPort()
- val binding = Http().bindAndHandleSync(r ⇒ HttpResponse(entity = r.uri.toString.reverse.takeWhile(Character.isDigit).reverse),
+ val binding = Http().bindAndHandleSync(
+ r ⇒ HttpResponse(entity = r.uri.toString.reverse.takeWhile(Character.isDigit).reverse),
serverHostName, serverPort)
val N = 100
@@ -45,7 +46,8 @@ class HighLevelOutgoingConnectionSpec extends AkkaSpec {
"be able to handle 100 pipelined requests across 4 connections (client-flow is reusable)" in Utils.assertAllStagesStopped {
val (_, serverHostName, serverPort) = TestUtils.temporaryServerHostnameAndPort()
- val binding = Http().bindAndHandleSync(r ⇒ HttpResponse(entity = r.uri.toString.reverse.takeWhile(Character.isDigit).reverse),
+ val binding = Http().bindAndHandleSync(
+ r ⇒ HttpResponse(entity = r.uri.toString.reverse.takeWhile(Character.isDigit).reverse),
serverHostName, serverPort)
val connFlow = Http().outgoingConnection(serverHostName, serverPort)
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala
index 8c1d5fa686..e2cea50f60 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala
@@ -228,7 +228,7 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka.
|""")
inside(expectResponse()) {
- case HttpResponse(StatusCodes.OK, _, HttpEntity.Chunked(_, data), _) =>
+ case HttpResponse(StatusCodes.OK, _, HttpEntity.Chunked(_, data), _) ⇒
val dataProbe = TestSubscriber.manualProbe[ChunkStreamPart]
// but only one consumed by server
data.take(1).to(Sink.fromSubscriber(dataProbe)).run()
@@ -242,7 +242,7 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka.
}
"proceed to next response once previous response's entity has been drained" in new TestSetup {
- def twice(action: => Unit): Unit = { action; action }
+ def twice(action: ⇒ Unit): Unit = { action; action }
twice {
requestsSub.sendNext(HttpRequest())
@@ -823,17 +823,16 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka.
val netOut = TestSubscriber.manualProbe[ByteString]()
val netIn = TestPublisher.manualProbe[ByteString]()
- RunnableGraph.fromGraph(GraphDSL.create(OutgoingConnectionBlueprint(Host("example.com"), settings, NoLogging)) { implicit b ⇒
- client ⇒
- import GraphDSL.Implicits._
- Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2
- client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> Sink.fromSubscriber(netOut)
- Source.fromPublisher(requests) ~> client.in1
- client.out2 ~> Sink.fromSubscriber(responses)
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(OutgoingConnectionBlueprint(Host("example.com"), settings, NoLogging)) { implicit b ⇒ client ⇒
+ import GraphDSL.Implicits._
+ Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2
+ client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> Sink.fromSubscriber(netOut)
+ Source.fromPublisher(requests) ~> client.in1
+ client.out2 ~> Sink.fromSubscriber(responses)
+ ClosedShape
}).run()
- netOut -> netIn
+ netOut → netIn
}
def wipeDate(string: String) =
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala
index f7ce292145..41bb35fe97 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala
@@ -98,7 +98,7 @@ class TlsEndpointVerificationSpec extends AkkaSpec("""
}
val serverSideTls = Http().sslTlsStage(ExampleHttpContexts.exampleServerContext, Server)
- val clientSideTls = Http().sslTlsStage(clientContext, Client, Some(hostname -> 8080))
+ val clientSideTls = Http().sslTlsStage(clientContext, Client, Some(hostname → 8080))
val server =
Http().serverLayer()
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala
index 87ae14ade1..4282538f0b 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala
@@ -33,11 +33,11 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
check {
"""nodes: 0/H, 0/e, 0/l, 0/l, 0/o, 1/Ω
|branchData:\u0020
- |values: 'Hello""" -> parser.formatRawTrie
+ |values: 'Hello""" → parser.formatRawTrie
}
check {
"""-H-e-l-l-o- 'Hello
- |""" -> parser.formatTrie
+ |""" → parser.formatTrie
}
}
@@ -47,12 +47,12 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
check {
"""nodes: 0/H, 1/e, 0/l, 0/l, 0/o, 1/Ω, 0/a, 0/l, 0/l, 0/o, 2/Ω
|branchData: 6/2/0
- |values: 'Hello, 'Hallo""" -> parser.formatRawTrie
+ |values: 'Hello, 'Hallo""" → parser.formatRawTrie
}
check {
""" ┌─a-l-l-o- 'Hallo
|-H-e-l-l-o- 'Hello
- |""" -> parser.formatTrie
+ |""" → parser.formatTrie
}
}
@@ -63,13 +63,13 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
check {
"""nodes: 2/H, 1/e, 0/l, 0/l, 0/o, 1/Ω, 0/a, 0/l, 0/l, 0/o, 2/Ω, 0/Y, 0/e, 0/a, 0/h, 3/Ω
|branchData: 6/2/0, 0/1/11
- |values: 'Hello, 'Hallo, 'Yeah""" -> parser.formatRawTrie
+ |values: 'Hello, 'Hallo, 'Yeah""" → parser.formatRawTrie
}
check {
""" ┌─a-l-l-o- 'Hallo
|-H-e-l-l-o- 'Hello
| └─Y-e-a-h- 'Yeah
- |""" -> parser.formatTrie
+ |""" → parser.formatTrie
}
}
@@ -81,14 +81,14 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
check {
"""nodes: 2/H, 1/e, 0/l, 0/l, 0/o, 1/Ω, 0/a, 0/l, 0/l, 0/o, 2/Ω, 0/Y, 0/e, 0/a, 0/h, 3/Ω, 0/o, 0/o, 4/Ω
|branchData: 6/2/16, 0/1/11
- |values: 'Hello, 'Hallo, 'Yeah, 'Hoo""" -> parser.formatRawTrie
+ |values: 'Hello, 'Hallo, 'Yeah, 'Hoo""" → parser.formatRawTrie
}
check {
""" ┌─a-l-l-o- 'Hallo
|-H-e-l-l-o- 'Hello
| | └─o-o- 'Hoo
| └─Y-e-a-h- 'Yeah
- |""" -> parser.formatTrie
+ |""" → parser.formatTrie
}
}
@@ -103,7 +103,7 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
|-H-e-l-l-o- 'Hello
| | └─o-o- 'Foo
| └─Y-e-a-h- 'Yeah
- |""" -> parser.formatTrie
+ |""" → parser.formatTrie
}
}
@@ -142,7 +142,7 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
check {
""" ┌─f-a-n-c-y---p-a-n-t-s-:-(Fancy-Pants)- -f-o-o-\r-\n- *Fancy-Pants: foo
|-h-e-l-l-o-:- -b-o-b- 'Hello
- |""" -> parser.formatTrie
+ |""" → parser.formatTrie
}
ixA shouldEqual ixB
headerA shouldEqual RawHeader("Fancy-Pants", "foo")
@@ -253,7 +253,7 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll
if (parser.isEmpty) HttpHeaderParser.insertRemainingCharsAsNewNodes(parser, ByteString(line), value)
else HttpHeaderParser.insert(parser, ByteString(line), value)
- def parseLine(line: String) = parser.parseHeaderLine(ByteString(line))() -> parser.resultHeader
+ def parseLine(line: String) = parser.parseHeaderLine(ByteString(line))() → parser.resultHeader
def parseAndCache(lineA: String)(lineB: String = lineA): HttpHeader = {
val (ixA, headerA) = parseLine(lineA)
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
index fad24c20e9..a20c8994b2 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala
@@ -152,7 +152,8 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
|Transfer-Encoding: foo, chunked, bar
|Host: x
|
- |""" should parseTo(HttpRequest(PUT, "/", List(`Transfer-Encoding`(TransferEncodings.Extension("foo"),
+ |""" should parseTo(HttpRequest(PUT, "/", List(`Transfer-Encoding`(
+ TransferEncodings.Extension("foo"),
TransferEncodings.chunked, TransferEncodings.Extension("bar")), Host("x"))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
@@ -195,8 +196,9 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
}
"message chunk with and without extension" in new Test {
- Seq(start +
- """3
+ Seq(
+ start +
+ """3
|abc
|10;some=stuff;bla
|0123456789ABCDEF
@@ -220,7 +222,8 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
}
"message end" in new Test {
- Seq(start,
+ Seq(
+ start,
"""0
|
|""") should generalMultiParseTo(
@@ -229,14 +232,16 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
}
"message end with extension and trailer" in new Test {
- Seq(start,
+ Seq(
+ start,
"""000;nice=true
|Foo: pip
| apo
|Bar: xyz
|
|""") should generalMultiParseTo(
- Right(baseRequest.withEntity(Chunked(`application/pdf`,
+ Right(baseRequest.withEntity(Chunked(
+ `application/pdf`,
source(LastChunk("nice=true", List(RawHeader("Foo", "pip apo"), RawHeader("Bar", "xyz"))))))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
@@ -265,7 +270,8 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
|
|0
|
- |""" should parseTo(HttpRequest(PATCH, "/data", List(`Transfer-Encoding`(TransferEncodings.Extension("fancy")),
+ |""" should parseTo(HttpRequest(PATCH, "/data", List(
+ `Transfer-Encoding`(TransferEncodings.Extension("fancy")),
Host("ping")), HttpEntity.Chunked(`application/pdf`, source(LastChunk))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
@@ -300,45 +306,55 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
HttpEntity.Chunked(`application/octet-stream`, source()))
"an illegal char after chunk size" in new Test {
- Seq(start,
+ Seq(
+ start,
"""15 ;
- |""") should generalMultiParseTo(Right(baseRequest),
+ |""") should generalMultiParseTo(
+ Right(baseRequest),
Left(EntityStreamError(ErrorInfo("Illegal character ' ' in chunk start"))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
"an illegal char in chunk size" in new Test {
- Seq(start, "bla") should generalMultiParseTo(Right(baseRequest),
+ Seq(start, "bla") should generalMultiParseTo(
+ Right(baseRequest),
Left(EntityStreamError(ErrorInfo("Illegal character 'l' in chunk start"))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
"too-long chunk extension" in new Test {
- Seq(start, "3;" + ("x" * 257)) should generalMultiParseTo(Right(baseRequest),
+ Seq(start, "3;" + ("x" * 257)) should generalMultiParseTo(
+ Right(baseRequest),
Left(EntityStreamError(ErrorInfo("HTTP chunk extension length exceeds configured limit of 256 characters"))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
"too-large chunk size" in new Test {
- Seq(start,
+ Seq(
+ start,
"""1a2b3c4d5e
- |""") should generalMultiParseTo(Right(baseRequest),
+ |""") should generalMultiParseTo(
+ Right(baseRequest),
Left(EntityStreamError(ErrorInfo("HTTP chunk size exceeds the configured limit of 1048576 bytes"))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
"an illegal chunk termination" in new Test {
- Seq(start,
+ Seq(
+ start,
"""3
- |abcde""") should generalMultiParseTo(Right(baseRequest),
+ |abcde""") should generalMultiParseTo(
+ Right(baseRequest),
Left(EntityStreamError(ErrorInfo("Illegal chunk termination"))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
"an illegal header in the trailer" in new Test {
- Seq(start,
+ Seq(
+ start,
"""0
- |F@oo: pip""") should generalMultiParseTo(Right(baseRequest),
+ |F@oo: pip""") should generalMultiParseTo(
+ Right(baseRequest),
Left(EntityStreamError(ErrorInfo("Illegal character '@' in header name"))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
@@ -352,7 +368,8 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
"a too long HTTP method" in new Test {
"ABCDEFGHIJKLMNOPQ " should
- parseToError(BadRequest,
+ parseToError(
+ BadRequest,
ErrorInfo(
"Unsupported HTTP method",
"HTTP method too long (started with 'ABCDEFGHIJKLMNOP'). Increase `akka.http.server.parsing.max-method-length` to support HTTP methods with more characters."))
@@ -363,18 +380,21 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
|Content-Length: 3
|Content-Length: 4
|
- |foo""" should parseToError(BadRequest,
+ |foo""" should parseToError(
+ BadRequest,
ErrorInfo("HTTP message must not contain more than one Content-Length header"))
}
"a too-long URI" in new Test {
- "GET /23456789012345678901 HTTP/1.1" should parseToError(RequestUriTooLong,
+ "GET /23456789012345678901 HTTP/1.1" should parseToError(
+ RequestUriTooLong,
ErrorInfo("URI length exceeds the configured limit of 20 characters"))
}
"HTTP version 1.2" in new Test {
"""GET / HTTP/1.2
- |""" should parseToError(HTTPVersionNotSupported,
+ |""" should parseToError(
+ HTTPVersionNotSupported,
ErrorInfo("The server does not support the HTTP protocol version used in the request."))
}
@@ -391,7 +411,8 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
"with a too-long header-value" in new Test {
"""|GET / HTTP/1.1
- |Fancy: 123456789012345678901234567890123""" should parseToError(BadRequest,
+ |Fancy: 123456789012345678901234567890123""" should parseToError(
+ BadRequest,
ErrorInfo("HTTP header value exceeds the configured limit of 32 characters"))
}
@@ -475,8 +496,9 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
def generalRawMultiParseTo(expected: Either[RequestOutput, HttpRequest]*): Matcher[Seq[String]] =
generalRawMultiParseTo(newParser, expected: _*)
- def generalRawMultiParseTo(parser: HttpRequestParser,
- expected: Either[RequestOutput, HttpRequest]*): Matcher[Seq[String]] =
+ def generalRawMultiParseTo(
+ parser: HttpRequestParser,
+ expected: Either[RequestOutput, HttpRequest]*): Matcher[Seq[String]] =
equal(expected.map(strictEqualify))
.matcher[Seq[Either[RequestOutput, StrictEqualHttpRequest]]] compose multiParse(parser)
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala
index f8598af3c7..5ad34b5d38 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/ResponseParserSpec.scala
@@ -59,7 +59,8 @@ class ResponseParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
}
"a response with a simple body" in new Test {
- collectBlocking(rawParse(GET,
+ collectBlocking(rawParse(
+ GET,
prep {
"""HTTP/1.1 200 Ok
|Content-Length: 4
@@ -93,7 +94,8 @@ class ResponseParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
|Transfer-Encoding: foo, chunked, bar
|Content-Length: 0
|
- |""" should parseTo(HttpResponse(ServerOnTheMove, List(`Transfer-Encoding`(TransferEncodings.Extension("foo"),
+ |""" should parseTo(HttpResponse(ServerOnTheMove, List(`Transfer-Encoding`(
+ TransferEncodings.Extension("foo"),
TransferEncodings.chunked, TransferEncodings.Extension("bar")))))
closeAfterResponseCompletion shouldEqual Seq(false)
}
@@ -158,8 +160,9 @@ class ResponseParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
}
"message chunk with and without extension" in new Test {
- Seq(start +
- """3
+ Seq(
+ start +
+ """3
|abc
|10;some=stuff;bla
|0123456789ABCDEF
@@ -182,7 +185,8 @@ class ResponseParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
}
"message end" in new Test {
- Seq(start,
+ Seq(
+ start,
"""0
|
|""") should generalMultiParseTo(
@@ -191,14 +195,16 @@ class ResponseParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
}
"message end with extension, trailer and remaining content" in new Test {
- Seq(start,
+ Seq(
+ start,
"""000;nice=true
|Foo: pip
| apo
|Bar: xyz
|
|HT""") should generalMultiParseTo(
- Right(baseResponse.withEntity(Chunked(`application/pdf`,
+ Right(baseResponse.withEntity(Chunked(
+ `application/pdf`,
source(LastChunk("nice=true", List(RawHeader("Foo", "pip apo"), RawHeader("Bar", "xyz"))))))),
Left(MessageStartError(400: StatusCode, ErrorInfo("Illegal HTTP message start"))))
closeAfterResponseCompletion shouldEqual Seq(false)
@@ -210,7 +216,8 @@ class ResponseParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
|Cont""", """ent-Type: application/pdf
|
|""") should generalMultiParseTo(
- Right(HttpResponse(headers = List(`Transfer-Encoding`(TransferEncodings.Extension("fancy"))),
+ Right(HttpResponse(
+ headers = List(`Transfer-Encoding`(TransferEncodings.Extension("fancy"))),
entity = HttpEntity.Chunked(`application/pdf`, source()))),
Left(EntityStreamError(ErrorInfo("Entity stream truncation"))))
closeAfterResponseCompletion shouldEqual Seq(false)
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala
index 036e704e1d..205481a6c8 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala
@@ -152,7 +152,8 @@ class RequestRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
"POST request with body" in new TestSetup() {
- HttpRequest(POST, "/abc/xyz", entity = Chunked(ContentTypes.`text/plain(UTF-8)`,
+ HttpRequest(POST, "/abc/xyz", entity = Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
source("XXXX", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"))) should renderTo {
"""POST /abc/xyz HTTP/1.1
|Host: test.com:8080
@@ -177,7 +178,8 @@ class RequestRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
ChunkStreamPart("ABCDEFGHIJKLMNOPQRSTUVWXYZ"),
LastChunk)
- HttpRequest(POST, "/abc/xyz", entity = Chunked(ContentTypes.`text/plain(UTF-8)`,
+ HttpRequest(POST, "/abc/xyz", entity = Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
Source(chunks))) should renderTo {
"""POST /abc/xyz HTTP/1.1
|Host: test.com:8080
@@ -203,7 +205,8 @@ class RequestRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
LastChunk,
LastChunk)
- HttpRequest(POST, "/abc/xyz", entity = Chunked(ContentTypes.`text/plain(UTF-8)`,
+ HttpRequest(POST, "/abc/xyz", entity = Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
Source(chunks))) should renderTo {
"""POST /abc/xyz HTTP/1.1
|Host: test.com:8080
@@ -317,8 +320,9 @@ class RequestRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
override def afterAll() = system.terminate()
- class TestSetup(val userAgent: Option[`User-Agent`] = Some(`User-Agent`("akka-http/1.0.0")),
- serverAddress: InetSocketAddress = new InetSocketAddress("test.com", 8080))
+ class TestSetup(
+ val userAgent: Option[`User-Agent`] = Some(`User-Agent`("akka-http/1.0.0")),
+ serverAddress: InetSocketAddress = new InetSocketAddress("test.com", 8080))
extends HttpRequestRendererFactory(userAgent, requestHeaderSizeHint = 64, NoLogging) {
def awaitAtMost: FiniteDuration = 3.seconds
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala
index 705ad921d4..951d92a4e8 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala
@@ -114,7 +114,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
requestMethod = HttpMethods.HEAD,
response = HttpResponse(
headers = List(Age(30), Connection("Keep-Alive")),
- entity = HttpEntity.CloseDelimited(ContentTypes.`text/plain(UTF-8)`,
+ entity = HttpEntity.CloseDelimited(
+ ContentTypes.`text/plain(UTF-8)`,
Source.single(ByteString("Foo"))))) should renderTo(
"""HTTP/1.1 200 OK
|Age: 30
@@ -130,7 +131,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
requestMethod = HttpMethods.HEAD,
response = HttpResponse(
headers = List(Age(30), Connection("Keep-Alive")),
- entity = HttpEntity.Chunked(ContentTypes.`text/plain(UTF-8)`,
+ entity = HttpEntity.Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
Source.single(HttpEntity.Chunk(ByteString("Foo")))))) should renderTo(
"""HTTP/1.1 200 OK
|Age: 30
@@ -187,7 +189,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
"status 200 and a custom Transfer-Encoding header" in new TestSetup() {
- HttpResponse(headers = List(`Transfer-Encoding`(TransferEncodings.Extension("fancy"))),
+ HttpResponse(
+ headers = List(`Transfer-Encoding`(TransferEncodings.Extension("fancy"))),
entity = "All good") should renderTo {
"""HTTP/1.1 200 OK
|Transfer-Encoding: fancy
@@ -232,7 +235,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
"a response with a CloseDelimited body" - {
"without data" in new TestSetup() {
ResponseRenderingContext(
- HttpResponse(200, entity = CloseDelimited(ContentTypes.`application/json`,
+ HttpResponse(200, entity = CloseDelimited(
+ ContentTypes.`application/json`,
source(ByteString.empty)))) should renderTo(
"""HTTP/1.1 200 OK
|Server: akka-http/1.0.0
@@ -244,7 +248,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
"consisting of two parts" in new TestSetup() {
ResponseRenderingContext(
- HttpResponse(200, entity = CloseDelimited(ContentTypes.`application/json`,
+ HttpResponse(200, entity = CloseDelimited(
+ ContentTypes.`application/json`,
source(ByteString("abc"), ByteString("defg"))))) should renderTo(
"""HTTP/1.1 200 OK
|Server: akka-http/1.0.0
@@ -283,7 +288,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
"with one chunk and no explicit LastChunk" in new TestSetup() {
- HttpResponse(entity = Chunked(ContentTypes.`text/plain(UTF-8)`,
+ HttpResponse(entity = Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
source("Yahoooo"))) should renderTo {
"""HTTP/1.1 200 OK
|Server: akka-http/1.0.0
@@ -300,8 +306,10 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
"with one chunk and an explicit LastChunk" in new TestSetup() {
- HttpResponse(entity = Chunked(ContentTypes.`text/plain(UTF-8)`,
- source(Chunk(ByteString("body123"), """key=value;another="tl;dr""""),
+ HttpResponse(entity = Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
+ source(
+ Chunk(ByteString("body123"), """key=value;another="tl;dr""""),
LastChunk("foo=bar", List(Age(30), RawHeader("Cache-Control", "public")))))) should renderTo {
"""HTTP/1.1 200 OK
|Server: akka-http/1.0.0
@@ -320,8 +328,10 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
"with one chunk and and extra LastChunks at the end (which should be ignored)" in new TestSetup() {
- HttpResponse(entity = Chunked(ContentTypes.`text/plain(UTF-8)`,
- source(Chunk(ByteString("body123"), """key=value;another="tl;dr""""),
+ HttpResponse(entity = Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
+ source(
+ Chunk(ByteString("body123"), """key=value;another="tl;dr""""),
LastChunk("foo=bar", List(Age(30), RawHeader("Cache-Control", "public"))), LastChunk))) should renderTo {
"""HTTP/1.1 200 OK
|Server: akka-http/1.0.0
@@ -340,7 +350,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
"with a custom Transfer-Encoding header" in new TestSetup() {
- HttpResponse(headers = List(`Transfer-Encoding`(TransferEncodings.Extension("fancy"))),
+ HttpResponse(
+ headers = List(`Transfer-Encoding`(TransferEncodings.Extension("fancy"))),
entity = Chunked(ContentTypes.`text/plain(UTF-8)`, source("Yahoooo"))) should renderTo {
"""HTTP/1.1 200 OK
|Transfer-Encoding: fancy, chunked
@@ -361,7 +372,8 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
"with two chunks" in new TestSetup() {
ResponseRenderingContext(
requestProtocol = HttpProtocols.`HTTP/1.0`,
- response = HttpResponse(entity = Chunked(ContentTypes.`application/json`,
+ response = HttpResponse(entity = Chunked(
+ ContentTypes.`application/json`,
source(Chunk("abc"), Chunk("defg"))))) should renderTo(
"""HTTP/1.1 200 OK
|Server: akka-http/1.0.0
@@ -375,8 +387,10 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
"with one chunk and an explicit LastChunk" in new TestSetup() {
ResponseRenderingContext(
requestProtocol = HttpProtocols.`HTTP/1.0`,
- response = HttpResponse(entity = Chunked(ContentTypes.`text/plain(UTF-8)`,
- source(Chunk(ByteString("body123"), """key=value;another="tl;dr""""),
+ response = HttpResponse(entity = Chunked(
+ ContentTypes.`text/plain(UTF-8)`,
+ source(
+ Chunk(ByteString("body123"), """key=value;another="tl;dr""""),
LastChunk("foo=bar", List(Age(30), RawHeader("Cache-Control", "public"))))))) should renderTo(
"""HTTP/1.1 200 OK
|Server: akka-http/1.0.0
@@ -559,9 +573,10 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
forAll(table)((reqProto, headReq, reqCH, resProto, resCH, resCD, renCH, close) ⇒
ResponseRenderingContext(
response = HttpResponse(200, headers = resCH.toList,
- entity = if (resCD) HttpEntity.CloseDelimited(ContentTypes.`text/plain(UTF-8)`,
- Source.single(ByteString("ENTITY")))
- else HttpEntity("ENTITY"), protocol = resProto),
+ entity = if (resCD) HttpEntity.CloseDelimited(
+ ContentTypes.`text/plain(UTF-8)`,
+ Source.single(ByteString("ENTITY")))
+ else HttpEntity("ENTITY"), protocol = resProto),
requestMethod = if (headReq) HttpMethods.HEAD else HttpMethods.GET,
requestProtocol = reqProto,
closeRequested = HttpMessage.connectionCloseExpected(reqProto, reqCH)) should renderTo(
@@ -588,7 +603,7 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
renderToImpl(expected, checkClose = Some(close))
def renderToImpl(expected: String, checkClose: Option[Boolean]): Matcher[ResponseRenderingContext] =
- equal(expected.stripMarginWithNewline("\r\n") -> checkClose).matcher[(String, Option[Boolean])] compose { ctx ⇒
+ equal(expected.stripMarginWithNewline("\r\n") → checkClose).matcher[(String, Option[Boolean])] compose { ctx ⇒
val (wasCompletedFuture, resultFuture) =
(Source.single(ctx) ++ Source.maybe[ResponseRenderingContext]) // never send upstream completion
.via(renderer.named("renderer"))
@@ -612,7 +627,7 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll
}
}
- Await.result(resultFuture, awaitAtMost).reduceLeft(_ ++ _).utf8String -> wasCompleted
+ Await.result(resultFuture, awaitAtMost).reduceLeft(_ ++ _).utf8String → wasCompleted
}
override def currentTimeMillis() = DateTime(2011, 8, 25, 9, 10, 29).clicks // provide a stable date for testing
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala
index 9891a70c0c..b322a88dbb 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala
@@ -377,7 +377,7 @@ class HttpServerSpec extends AkkaSpec(
})
"proceed to next request once previous request's entity has been drained" in assertAllStagesStopped(new TestSetup {
- def twice(action: => Unit): Unit = { action; action }
+ def twice(action: ⇒ Unit): Unit = { action; action }
twice {
send("""POST / HTTP/1.1
@@ -977,7 +977,7 @@ class HttpServerSpec extends AkkaSpec(
.thrownBy(entity.dataBytes.runFold(ByteString.empty)(_ ++ _).awaitResult(100.millis))
.getCause
error shouldEqual EntityStreamSizeException(limit, Some(actualSize))
- error.getMessage should include ("exceeded content length limit")
+ error.getMessage should include("exceeded content length limit")
responses.expectRequest()
responses.sendError(error.asInstanceOf[Exception])
@@ -1000,7 +1000,7 @@ class HttpServerSpec extends AkkaSpec(
.thrownBy(entity.dataBytes.runFold(ByteString.empty)(_ ++ _).awaitResult(100.millis))
.getCause
error shouldEqual EntityStreamSizeException(limit, None)
- error.getMessage should include ("exceeded content length limit")
+ error.getMessage should include("exceeded content length limit")
responses.expectRequest()
responses.sendError(error.asInstanceOf[Exception])
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala
index bf85d4c6d7..8758a9e2ab 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala
@@ -34,17 +34,16 @@ abstract class HttpServerTestSetupBase {
val netIn = TestPublisher.probe[ByteString]()
val netOut = ByteStringSinkProbe()
- RunnableGraph.fromGraph(GraphDSL.create(HttpServerBluePrint(settings, remoteAddress = remoteAddress, log = NoLogging)) { implicit b ⇒
- server ⇒
- import GraphDSL.Implicits._
- Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> server.in2
- server.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x }.buffer(1, OverflowStrategy.backpressure) ~> netOut.sink
- server.out2 ~> Sink.fromSubscriber(requests)
- Source.fromPublisher(responses) ~> server.in1
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(HttpServerBluePrint(settings, remoteAddress = remoteAddress, log = NoLogging)) { implicit b ⇒ server ⇒
+ import GraphDSL.Implicits._
+ Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> server.in2
+ server.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x }.buffer(1, OverflowStrategy.backpressure) ~> netOut.sink
+ server.out2 ~> Sink.fromSubscriber(requests)
+ Source.fromPublisher(responses) ~> server.in1
+ ClosedShape
}).run()
- netIn -> netOut
+ netIn → netOut
}
def expectResponseWithWipedDate(expected: String): Unit = {
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala
index b3475a5cbf..2b87ddfc18 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala
@@ -23,7 +23,7 @@ class MessageSpec extends FreeSpec with Matchers with WithMaterializerSpec {
val InvalidUtf8TwoByteSequence: ByteString = ByteString(
(128 + 64).toByte, // start two byte sequence
0 // but don't finish it
- )
+ )
"The WebSocket implementation should" - {
"collect messages from frames" - {
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala
index 5f68cafeec..d249b73758 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala
@@ -90,7 +90,7 @@ object WSClientAutobahnTest extends App {
val res =
getCaseCount().flatMap { count ⇒
println(s"Retrieving case info for $count cases...")
- Future.traverse(1 to count)(getCaseInfo).map(_.map(e ⇒ e.caseInfo.id -> e).toMap)
+ Future.traverse(1 to count)(getCaseInfo).map(_.map(e ⇒ e.caseInfo.id → e).toMap)
}
res.foreach { res ⇒
println(s"Received info for ${res.size} cases")
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSServerAutobahnTest.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSServerAutobahnTest.scala
index 229623fec1..cf5bf0196b 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSServerAutobahnTest.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSServerAutobahnTest.scala
@@ -27,14 +27,15 @@ object WSServerAutobahnTest extends App {
val mode = props.getOrElse("akka.ws-mode", "read") // read or sleep
try {
- val binding = Http().bindAndHandleSync({
- case req @ HttpRequest(GET, Uri.Path("/"), _, _, _) if req.header[UpgradeToWebSocket].isDefined ⇒
- req.header[UpgradeToWebSocket] match {
- case Some(upgrade) ⇒ upgrade.handleMessages(echoWebSocketService) // needed for running the autobahn test suite
- case None ⇒ HttpResponse(400, entity = "Not a valid websocket request!")
- }
- case _: HttpRequest ⇒ HttpResponse(404, entity = "Unknown resource!")
- },
+ val binding = Http().bindAndHandleSync(
+ {
+ case req @ HttpRequest(GET, Uri.Path("/"), _, _, _) if req.header[UpgradeToWebSocket].isDefined ⇒
+ req.header[UpgradeToWebSocket] match {
+ case Some(upgrade) ⇒ upgrade.handleMessages(echoWebSocketService) // needed for running the autobahn test suite
+ case None ⇒ HttpResponse(400, entity = "Not a valid websocket request!")
+ }
+ case _: HttpRequest ⇒ HttpResponse(404, entity = "Unknown resource!")
+ },
interface = host, // adapt to your docker host IP address if necessary
port = port)
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala
index 72a27b31e5..9626741bbe 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala
@@ -16,13 +16,14 @@ trait WSTestSetupBase extends Matchers {
def expectBytes(length: Int): ByteString
def expectBytes(bytes: ByteString): Unit
- def sendWSFrame(opcode: Opcode,
- data: ByteString,
- fin: Boolean,
- mask: Boolean = false,
- rsv1: Boolean = false,
- rsv2: Boolean = false,
- rsv3: Boolean = false): Unit = {
+ def sendWSFrame(
+ opcode: Opcode,
+ data: ByteString,
+ fin: Boolean,
+ mask: Boolean = false,
+ rsv1: Boolean = false,
+ rsv2: Boolean = false,
+ rsv3: Boolean = false): Unit = {
val (theMask, theData) =
if (mask) {
val m = Random.nextInt()
@@ -34,13 +35,14 @@ trait WSTestSetupBase extends Matchers {
def sendWSCloseFrame(closeCode: Int, mask: Boolean = false): Unit =
send(closeFrame(closeCode, mask))
- def expectWSFrame(opcode: Opcode,
- data: ByteString,
- fin: Boolean,
- mask: Option[Int] = None,
- rsv1: Boolean = false,
- rsv2: Boolean = false,
- rsv3: Boolean = false): Unit =
+ def expectWSFrame(
+ opcode: Opcode,
+ data: ByteString,
+ fin: Boolean,
+ mask: Option[Int] = None,
+ rsv1: Boolean = false,
+ rsv2: Boolean = false,
+ rsv3: Boolean = false): Unit =
expectBytes(frameHeader(opcode, data.length, fin, mask, rsv1, rsv2, rsv3) ++ data)
def expectWSCloseFrame(closeCode: Int, mask: Boolean = false): Unit =
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala
index f608bf5ff6..3768c13755 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala
@@ -13,11 +13,11 @@ object WSTestUtils {
def frameHeader(
opcode: Opcode,
length: Long,
- fin: Boolean,
- mask: Option[Int] = None,
- rsv1: Boolean = false,
- rsv2: Boolean = false,
- rsv3: Boolean = false): ByteString = {
+ fin: Boolean,
+ mask: Option[Int] = None,
+ rsv1: Boolean = false,
+ rsv2: Boolean = false,
+ rsv3: Boolean = false): ByteString = {
def set(should: Boolean, mask: Int): Int =
if (should) mask else 0
diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala
index 2c0c4ed10e..8b9d1107e8 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala
@@ -312,13 +312,12 @@ class WebSocketClientSpec extends FreeSpec with Matchers with WithMaterializerSp
val netIn = TestPublisher.probe[ByteString]()
val graph =
- RunnableGraph.fromGraph(GraphDSL.create(clientLayer) { implicit b ⇒
- client ⇒
- import GraphDSL.Implicits._
- Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2
- client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> netOut.sink
- client.out2 ~> clientImplementation ~> client.in1
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(clientLayer) { implicit b ⇒ client ⇒
+ import GraphDSL.Implicits._
+ Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2
+ client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> netOut.sink
+ client.out2 ~> clientImplementation ~> client.in1
+ ClosedShape
})
val response = graph.run()
diff --git a/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala
index 51e213a5de..f7e02d5534 100644
--- a/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala
@@ -37,11 +37,12 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
"Accept: application/vnd.spray" =!=
Accept(`application/vnd.spray`)
"Accept: */*, text/*; foo=bar, custom/custom; bar=\"b>az\"" =!=
- Accept(`*/*`,
- MediaRange.custom("text", Map("foo" -> "bar")),
- MediaType.customBinary("custom", "custom", MediaType.Compressible, params = Map("bar" -> "b>az")))
+ Accept(
+ `*/*`,
+ MediaRange.custom("text", Map("foo" → "bar")),
+ MediaType.customBinary("custom", "custom", MediaType.Compressible, params = Map("bar" → "b>az")))
"Accept: application/*+xml; version=2" =!=
- Accept(MediaType.customBinary("application", "*+xml", MediaType.Compressible, params = Map("version" -> "2")))
+ Accept(MediaType.customBinary("application", "*+xml", MediaType.Compressible, params = Map("version" → "2")))
}
"Accept-Charset" in {
@@ -135,20 +136,21 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
Authorization(BasicHttpCredentials("Aladdin", "open sesame")).renderedTo(
"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
"""Authorization: Fancy yes="n:o", nonce=42""" =!=
- Authorization(GenericHttpCredentials("Fancy", Map("yes" -> "n:o", "nonce" -> "42"))).renderedTo(
+ Authorization(GenericHttpCredentials("Fancy", Map("yes" → "n:o", "nonce" → "42"))).renderedTo(
"""Fancy yes="n:o",nonce=42""")
"""Authorization: Fancy yes=no,nonce="4\\2"""" =!=
- Authorization(GenericHttpCredentials("Fancy", Map("yes" -> "no", "nonce" -> """4\2""")))
+ Authorization(GenericHttpCredentials("Fancy", Map("yes" → "no", "nonce" → """4\2""")))
"Authorization: Basic Qm9iOg==" =!=
Authorization(BasicHttpCredentials("Bob", ""))
"""Authorization: Digest name=Bob""" =!=
- Authorization(GenericHttpCredentials("Digest", Map("name" -> "Bob")))
+ Authorization(GenericHttpCredentials("Digest", Map("name" → "Bob")))
"""Authorization: Bearer mF_9.B5f-4.1JqM/""" =!=
Authorization(OAuth2BearerToken("mF_9.B5f-4.1JqM/"))
"Authorization: NoParamScheme" =!=
Authorization(GenericHttpCredentials("NoParamScheme", Map.empty[String, String]))
"Authorization: QVFJQzV3TTJMWTRTZmN3Zk=" =!=
- ErrorInfo("Illegal HTTP header 'Authorization': Invalid input '=', expected auth-param, OWS, token68, 'EOI' or tchar (line 1, column 23)",
+ ErrorInfo(
+ "Illegal HTTP header 'Authorization': Invalid input '=', expected auth-param, OWS, token68, 'EOI' or tchar (line 1, column 23)",
"""QVFJQzV3TTJMWTRTZmN3Zk=
| ^""".stripMarginWithNewline("\n"))
}
@@ -179,7 +181,7 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
"Content-Disposition" in {
"Content-Disposition: form-data" =!= `Content-Disposition`(ContentDispositionTypes.`form-data`)
"Content-Disposition: attachment; name=field1; filename=\"file/txt\"" =!=
- `Content-Disposition`(ContentDispositionTypes.attachment, Map("name" -> "field1", "filename" -> "file/txt"))
+ `Content-Disposition`(ContentDispositionTypes.attachment, Map("name" → "field1", "filename" → "file/txt"))
}
"Content-Encoding" in {
@@ -201,7 +203,7 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
"Content-Type: text/plain; charset=utf8" =!=
`Content-Type`(ContentType(`text/plain`, `UTF-8`)).renderedTo("text/plain; charset=UTF-8")
"Content-Type: text/xml2; version=3; charset=windows-1252" =!=
- `Content-Type`(MediaType.customWithOpenCharset("text", "xml2", params = Map("version" -> "3"))
+ `Content-Type`(MediaType.customWithOpenCharset("text", "xml2", params = Map("version" → "3"))
withCharset HttpCharsets.getForKey("windows-1252").get)
"Content-Type: text/plain; charset=fancy-pants" =!=
`Content-Type`(`text/plain` withCharset HttpCharset.custom("fancy-pants"))
@@ -224,17 +226,17 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
}
"Cookie (RFC 6265)" in {
- "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" -> "31d4d96e407aad42")
- "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" -> "31d4d96e407aad42", "lang" -> "en>US")
- "Cookie: a=1; b=2" =!= Cookie("a" -> "1", "b" -> "2")
- "Cookie: a=1;b=2" =!= Cookie("a" -> "1", "b" -> "2").renderedTo("a=1; b=2")
- "Cookie: a=1 ;b=2" =!= Cookie("a" -> "1", "b" -> "2").renderedTo("a=1; b=2")
+ "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" → "31d4d96e407aad42")
+ "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" → "31d4d96e407aad42", "lang" → "en>US")
+ "Cookie: a=1; b=2" =!= Cookie("a" → "1", "b" → "2")
+ "Cookie: a=1;b=2" =!= Cookie("a" → "1", "b" → "2").renderedTo("a=1; b=2")
+ "Cookie: a=1 ;b=2" =!= Cookie("a" → "1", "b" → "2").renderedTo("a=1; b=2")
- "Cookie: z=0;a=1,b=2" =!= Cookie("z" -> "0").renderedTo("z=0")
- """Cookie: a=1;b="test"""" =!= Cookie("a" -> "1", "b" -> "test").renderedTo("a=1; b=test")
+ "Cookie: z=0;a=1,b=2" =!= Cookie("z" → "0").renderedTo("z=0")
+ """Cookie: a=1;b="test"""" =!= Cookie("a" → "1", "b" → "test").renderedTo("a=1; b=test")
- "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie("a" -> "1", "c" -> "xyz").renderedTo("a=1; c=xyz")
- "Cookie: a=1; b=ä; c=d" =!= Cookie("a" -> "1", "c" -> "d").renderedTo("a=1; c=d")
+ "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie("a" → "1", "c" → "xyz").renderedTo("a=1; c=xyz")
+ "Cookie: a=1; b=ä; c=d" =!= Cookie("a" → "1", "c" → "d").renderedTo("a=1; c=d")
"Cookie: a=1,2" =!=
ErrorInfo(
@@ -243,16 +245,16 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
}
"Cookie (Raw)" in {
- "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" -> "31d4d96e407aad42").withCookieParsingMode(CookieParsingMode.Raw)
- "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" -> "31d4d96e407aad42", "lang" -> "en>US").withCookieParsingMode(CookieParsingMode.Raw)
- "Cookie: a=1; b=2" =!= Cookie("a" -> "1", "b" -> "2").withCookieParsingMode(CookieParsingMode.Raw)
- "Cookie: a=1;b=2" =!= Cookie("a" -> "1", "b" -> "2").renderedTo("a=1; b=2").withCookieParsingMode(CookieParsingMode.Raw)
- "Cookie: a=1 ;b=2" =!= Cookie(List(HttpCookiePair.raw("a" -> "1 "), HttpCookiePair("b" -> "2"))).renderedTo("a=1 ; b=2").withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" → "31d4d96e407aad42").withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" → "31d4d96e407aad42", "lang" → "en>US").withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: a=1; b=2" =!= Cookie("a" → "1", "b" → "2").withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: a=1;b=2" =!= Cookie("a" → "1", "b" → "2").renderedTo("a=1; b=2").withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: a=1 ;b=2" =!= Cookie(List(HttpCookiePair.raw("a" → "1 "), HttpCookiePair("b" → "2"))).renderedTo("a=1 ; b=2").withCookieParsingMode(CookieParsingMode.Raw)
- "Cookie: z=0; a=1,b=2" =!= Cookie(List(HttpCookiePair("z" -> "0"), HttpCookiePair.raw("a" -> "1,b=2"))).withCookieParsingMode(CookieParsingMode.Raw)
- """Cookie: a=1;b="test"""" =!= Cookie(List(HttpCookiePair("a" -> "1"), HttpCookiePair.raw("b" -> "\"test\""))).renderedTo("a=1; b=\"test\"").withCookieParsingMode(CookieParsingMode.Raw)
- "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie(List(HttpCookiePair("a" -> "1"), HttpCookiePair.raw("b" -> "f\"d\"c\""), HttpCookiePair("c" -> "xyz"))).withCookieParsingMode(CookieParsingMode.Raw)
- "Cookie: a=1; b=ä; c=d" =!= Cookie(List(HttpCookiePair("a" -> "1"), HttpCookiePair.raw("b" -> "ä"), HttpCookiePair("c" -> "d"))).withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: z=0; a=1,b=2" =!= Cookie(List(HttpCookiePair("z" → "0"), HttpCookiePair.raw("a" → "1,b=2"))).withCookieParsingMode(CookieParsingMode.Raw)
+ """Cookie: a=1;b="test"""" =!= Cookie(List(HttpCookiePair("a" → "1"), HttpCookiePair.raw("b" → "\"test\""))).renderedTo("a=1; b=\"test\"").withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie(List(HttpCookiePair("a" → "1"), HttpCookiePair.raw("b" → "f\"d\"c\""), HttpCookiePair("c" → "xyz"))).withCookieParsingMode(CookieParsingMode.Raw)
+ "Cookie: a=1; b=ä; c=d" =!= Cookie(List(HttpCookiePair("a" → "1"), HttpCookiePair.raw("b" → "ä"), HttpCookiePair("c" → "d"))).withCookieParsingMode(CookieParsingMode.Raw)
}
"Date" in {
@@ -356,7 +358,8 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
"""Link: >; rel="http://example.net/foo"""" =!= Link(Uri("/"), LinkParams.rel("http://example.net/foo"))
.renderedTo(">; rel=http://example.net/foo")
- """Link: ; rel="start http://example.net/relation/other"""" =!= Link(Uri("http://example.org/"),
+ """Link: ; rel="start http://example.net/relation/other"""" =!= Link(
+ Uri("http://example.org/"),
LinkParams.rel("start http://example.net/relation/other"))
// only one 'rel=' is allowed, http://tools.ietf.org/html/rfc5988#section-5.3 requires any subsequent ones to be skipped
@@ -370,18 +373,19 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
"Proxy-Authenticate" in {
"Proxy-Authenticate: Basic realm=\"WallyWorld\",attr=\"val>ue\", Fancy realm=\"yeah\"" =!=
- `Proxy-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" -> "val>ue")), HttpChallenge("Fancy", "yeah"))
+ `Proxy-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" → "val>ue")), HttpChallenge("Fancy", "yeah"))
}
"Proxy-Authorization" in {
"""Proxy-Authorization: Fancy yes=no,nonce="4\\2"""" =!=
- `Proxy-Authorization`(GenericHttpCredentials("Fancy", Map("yes" -> "no", "nonce" -> """4\2""")))
+ `Proxy-Authorization`(GenericHttpCredentials("Fancy", Map("yes" → "no", "nonce" → """4\2""")))
}
"Referer" in {
"Referer: https://spray.io/secure" =!= Referer(Uri("https://spray.io/secure"))
"Referer: /en-us/default.aspx?foo=bar" =!= Referer(Uri("/en-us/default.aspx?foo=bar"))
- "Referer: https://akka.io/#sec" =!= ErrorInfo("Illegal HTTP header 'Referer': requirement failed",
+ "Referer: https://akka.io/#sec" =!= ErrorInfo(
+ "Illegal HTTP header 'Referer': requirement failed",
"Referer header URI must not contain a fragment")
}
@@ -416,19 +420,19 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
"Sec-WebSocket-Extensions: abc, def" =!=
`Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc"), WebSocketExtension("def")))
"Sec-WebSocket-Extensions: abc; param=2; use_y, def" =!=
- `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" -> "2", "use_y" -> "")), WebSocketExtension("def")))
+ `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" → "2", "use_y" → "")), WebSocketExtension("def")))
"Sec-WebSocket-Extensions: abc; param=\",xyz\", def" =!=
- `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" -> ",xyz")), WebSocketExtension("def")))
+ `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" → ",xyz")), WebSocketExtension("def")))
// real examples from https://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-19
"Sec-WebSocket-Extensions: permessage-deflate" =!=
`Sec-WebSocket-Extensions`(Vector(WebSocketExtension("permessage-deflate")))
"Sec-WebSocket-Extensions: permessage-deflate; client_max_window_bits; server_max_window_bits=10" =!=
- `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("permessage-deflate", Map("client_max_window_bits" -> "", "server_max_window_bits" -> "10"))))
+ `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("permessage-deflate", Map("client_max_window_bits" → "", "server_max_window_bits" → "10"))))
"Sec-WebSocket-Extensions: permessage-deflate; client_max_window_bits; server_max_window_bits=10, permessage-deflate; client_max_window_bits" =!=
`Sec-WebSocket-Extensions`(Vector(
- WebSocketExtension("permessage-deflate", Map("client_max_window_bits" -> "", "server_max_window_bits" -> "10")),
- WebSocketExtension("permessage-deflate", Map("client_max_window_bits" -> ""))))
+ WebSocketExtension("permessage-deflate", Map("client_max_window_bits" → "", "server_max_window_bits" → "10")),
+ WebSocketExtension("permessage-deflate", Map("client_max_window_bits" → ""))))
}
"Sec-WebSocket-Key" in {
"Sec-WebSocket-Key: c2Zxb3JpbmgyMzA5dGpoMDIzOWdlcm5vZ2luCg==" =!= `Sec-WebSocket-Key`("c2Zxb3JpbmgyMzA5dGpoMDIzOWdlcm5vZ2luCg==")
@@ -550,13 +554,14 @@ class HttpHeaderSpec extends FreeSpec with Matchers {
qop="auth,auth-int",
nonce=dcd98b7102dd2f0e8b11d0f600bfb0c093,
opaque=5ccc069c403ebaf9f0171e9517f40e41""".stripMarginWithNewline("\r\n") =!=
- `WWW-Authenticate`(HttpChallenge("Digest", "testrealm@host.com", Map("qop" -> "auth,auth-int",
- "nonce" -> "dcd98b7102dd2f0e8b11d0f600bfb0c093", "opaque" -> "5ccc069c403ebaf9f0171e9517f40e41"))).renderedTo(
+ `WWW-Authenticate`(HttpChallenge("Digest", "testrealm@host.com", Map(
+ "qop" → "auth,auth-int",
+ "nonce" → "dcd98b7102dd2f0e8b11d0f600bfb0c093", "opaque" → "5ccc069c403ebaf9f0171e9517f40e41"))).renderedTo(
"Digest realm=\"testrealm@host.com\",qop=\"auth,auth-int\",nonce=dcd98b7102dd2f0e8b11d0f600bfb0c093,opaque=5ccc069c403ebaf9f0171e9517f40e41")
"WWW-Authenticate: Basic realm=\"WallyWorld\",attr=\"val>ue\", Fancy realm=\"yeah\"" =!=
- `WWW-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" -> "val>ue")), HttpChallenge("Fancy", "yeah"))
+ `WWW-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" → "val>ue")), HttpChallenge("Fancy", "yeah"))
"""WWW-Authenticate: Fancy realm="Secure Area",nonce=42""" =!=
- `WWW-Authenticate`(HttpChallenge("Fancy", "Secure Area", Map("nonce" -> "42")))
+ `WWW-Authenticate`(HttpChallenge("Fancy", "Secure Area", Map("nonce" → "42")))
}
"X-Forwarded-For" in {
diff --git a/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala b/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala
index 609ed7695f..aaafb8ea5b 100644
--- a/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala
@@ -42,16 +42,16 @@ class JavaApiSpec extends FreeSpec with MustMatchers {
}
"access parameterMap" in {
Uri.create("/abc?name=blub&age=28")
- .query().toMap.asScala must contain allOf ("name" -> "blub", "age" -> "28")
+ .query().toMap.asScala must contain allOf ("name" → "blub", "age" → "28")
}
"access parameters" in {
val Seq(param1, param2, param3) =
Uri.create("/abc?name=blub&age=28&name=blub2")
.query().toList.asScala.map(_.toScala)
- param1 must be("name" -> "blub")
- param2 must be("age" -> "28")
- param3 must be("name" -> "blub2")
+ param1 must be("name" → "blub")
+ param2 must be("age" → "28")
+ param3 must be("name" → "blub2")
}
"access single parameter" in {
val query = Uri.create("/abc?name=blub").query()
diff --git a/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiTestCaseSpecs.scala b/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiTestCaseSpecs.scala
index 794049b84e..6cf8e71254 100644
--- a/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiTestCaseSpecs.scala
+++ b/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiTestCaseSpecs.scala
@@ -61,7 +61,8 @@ class JavaApiTestCaseSpecs extends FreeSpec with MustMatchers {
Uri.create("/order").query(JavaApiTestCases.addSessionId(orderId)) must be(Uri.create("/order?orderId=123&session=abcdefghijkl"))
}
"create HttpsContext" in {
- akka.http.javadsl.ConnectionContext.https(SSLContext.getDefault,
+ akka.http.javadsl.ConnectionContext.https(
+ SSLContext.getDefault,
Optional.empty[java.util.Collection[String]],
Optional.empty[java.util.Collection[String]],
Optional.empty[TLSClientAuth],
diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala
index 6758b300ec..264c3fbae0 100644
--- a/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala
@@ -251,7 +251,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit
def runRequest(uri: Uri): Future[(Try[HttpResponse], Int)] = {
val itNeverSends = Chunked.fromData(ContentTypes.`text/plain(UTF-8)`, Source.maybe[ByteString])
- Source.single(HttpRequest(POST, uri, entity = itNeverSends) -> 1)
+ Source.single(HttpRequest(POST, uri, entity = itNeverSends) → 1)
.via(pool)
.runWith(Sink.head)
}
@@ -535,7 +535,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit
connection.remoteAddress.getHostName shouldEqual hostname
connection.remoteAddress.getPort shouldEqual port
- requestPublisherProbe -> responseSubscriberProbe
+ requestPublisherProbe → responseSubscriberProbe
}
def acceptConnection(): (TestSubscriber.ManualProbe[HttpRequest], TestPublisher.ManualProbe[HttpResponse]) = {
@@ -552,7 +552,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit
pub.subscribe(requestSubscriberProbe)
responsePublisherProbe.subscribe(sub)
- requestSubscriberProbe -> responsePublisherProbe
+ requestSubscriberProbe → responsePublisherProbe
}
def openClientSocket() = new Socket(hostname, port)
@@ -568,7 +568,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit
val sb = new java.lang.StringBuilder
val cbuf = new Array[Char](256)
@tailrec def drain(): (String, BufferedReader) = reader.read(cbuf) match {
- case -1 ⇒ sb.toString -> reader
+ case -1 ⇒ sb.toString → reader
case n ⇒ sb.append(cbuf, 0, n); drain()
}
drain()
diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala
index 105e583289..0176913e0d 100644
--- a/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala
+++ b/akka-http-core/src/test/scala/akka/http/scaladsl/TestServer.scala
@@ -56,7 +56,8 @@ object TestServer extends App {
////////////// helpers //////////////
lazy val index = HttpResponse(
- entity = HttpEntity(ContentTypes.`text/html(UTF-8)`,
+ entity = HttpEntity(
+ ContentTypes.`text/html(UTF-8)`,
"""|
|
| Say hello to akka-http-core!
diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala
index 2b35b8e4b4..a37f21fcc7 100644
--- a/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala
@@ -43,7 +43,7 @@ class MultipartSpec extends WordSpec with Matchers with Inside with BeforeAndAft
Multipart.FormData.BodyPart("bar", defaultEntity("BAR")) :: Nil))
val strict = Await.result(streamed.toStrict(1.second), 1.second)
- strict shouldEqual Multipart.FormData(Map("foo" -> HttpEntity("FOO"), "bar" -> HttpEntity("BAR")))
+ strict shouldEqual Multipart.FormData(Map("foo" → HttpEntity("FOO"), "bar" → HttpEntity("BAR")))
}
}
diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala
index b17884d834..bac88cdd02 100644
--- a/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala
+++ b/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala
@@ -148,7 +148,8 @@ class UriSpec extends WordSpec with Matchers {
"not accept illegal IPv6 literals" in {
// 5 char quad
the[IllegalUriException] thrownBy Host("[::12345]") shouldBe {
- IllegalUriException("Illegal URI host: Invalid input '5', expected ':' or ']' (line 1, column 8)",
+ IllegalUriException(
+ "Illegal URI host: Invalid input '5', expected ':' or ']' (line 1, column 8)",
"[::12345]\n" +
" ^")
}
@@ -305,29 +306,29 @@ class UriSpec extends WordSpec with Matchers {
query.getOrElse("d", "x") shouldEqual "x"
query.getAll("b") shouldEqual List("", "4", "2")
query.getAll("d") shouldEqual Nil
- query.toMap shouldEqual Map("a" -> "1", "b" -> "", "c" -> "3")
- query.toMultiMap shouldEqual Map("a" -> List("1"), "b" -> List("", "4", "2"), "c" -> List("3"))
- query.toList shouldEqual List("a" -> "1", "b" -> "2", "c" -> "3", "b" -> "4", "b" -> "")
- query.toSeq shouldEqual Seq("a" -> "1", "b" -> "2", "c" -> "3", "b" -> "4", "b" -> "")
+ query.toMap shouldEqual Map("a" → "1", "b" → "", "c" → "3")
+ query.toMultiMap shouldEqual Map("a" → List("1"), "b" → List("", "4", "2"), "c" → List("3"))
+ query.toList shouldEqual List("a" → "1", "b" → "2", "c" → "3", "b" → "4", "b" → "")
+ query.toSeq shouldEqual Seq("a" → "1", "b" → "2", "c" → "3", "b" → "4", "b" → "")
}
"support conversion from list of name/value pairs" in {
import Query._
- val pairs = List("key1" -> "value1", "key2" -> "value2", "key3" -> "value3")
+ val pairs = List("key1" → "value1", "key2" → "value2", "key3" → "value3")
Query(pairs: _*).toList.diff(pairs) shouldEqual Nil
Query() shouldEqual Empty
- Query("k" -> "v") shouldEqual ("k" -> "v") +: Empty
+ Query("k" → "v") shouldEqual ("k" → "v") +: Empty
}
"encode special separators in query parameter names" in {
- Query("a=b" -> "c").toString() shouldEqual "a%3Db=c"
- Query("a&b" -> "c").toString() shouldEqual "a%26b=c"
- Query("a+b" -> "c").toString() shouldEqual "a%2Bb=c"
- Query("a;b" -> "c").toString() shouldEqual "a%3Bb=c"
+ Query("a=b" → "c").toString() shouldEqual "a%3Db=c"
+ Query("a&b" → "c").toString() shouldEqual "a%26b=c"
+ Query("a+b" → "c").toString() shouldEqual "a%2Bb=c"
+ Query("a;b" → "c").toString() shouldEqual "a%3Bb=c"
}
"encode special separators in query parameter values" in {
- Query("a" -> "b=c").toString() shouldEqual "a=b%3Dc"
- Query("a" -> "b&c").toString() shouldEqual "a=b%26c"
- Query("a" -> "b+c").toString() shouldEqual "a=b%2Bc"
- Query("a" -> "b;c").toString() shouldEqual "a=b%3Bc"
+ Query("a" → "b=c").toString() shouldEqual "a=b%3Dc"
+ Query("a" → "b&c").toString() shouldEqual "a=b%26c"
+ Query("a" → "b+c").toString() shouldEqual "a=b%2Bc"
+ Query("a" → "b;c").toString() shouldEqual "a=b%3Bc"
}
}
@@ -456,7 +457,7 @@ class UriSpec extends WordSpec with Matchers {
"support tunneling a URI through a query param" in {
val uri = Uri("http://aHost/aPath?aParam=aValue#aFragment")
- val q = Query("uri" -> uri.toString)
+ val q = Query("uri" → uri.toString)
val uri2 = Uri(path = Path./, fragment = Some("aFragment")).withQuery(q).toString
uri2 shouldEqual "/?uri=http://ahost/aPath?aParam%3DaValue%23aFragment#aFragment"
Uri(uri2).query() shouldEqual q
@@ -466,42 +467,48 @@ class UriSpec extends WordSpec with Matchers {
"produce proper error messages for illegal URIs" in {
// illegal scheme
the[IllegalUriException] thrownBy Uri("foö:/a") shouldBe {
- IllegalUriException("Illegal URI reference: Invalid input 'ö', expected scheme-char, 'EOI', '#', ':', '?', slashSegments or pchar (line 1, column 3)",
+ IllegalUriException(
+ "Illegal URI reference: Invalid input 'ö', expected scheme-char, 'EOI', '#', ':', '?', slashSegments or pchar (line 1, column 3)",
"foö:/a\n" +
" ^")
}
// illegal userinfo
the[IllegalUriException] thrownBy Uri("http://user:ö@host") shouldBe {
- IllegalUriException("Illegal URI reference: Invalid input 'ö', expected userinfo-char, pct-encoded, '@' or port (line 1, column 13)",
+ IllegalUriException(
+ "Illegal URI reference: Invalid input 'ö', expected userinfo-char, pct-encoded, '@' or port (line 1, column 13)",
"http://user:ö@host\n" +
" ^")
}
// illegal percent-encoding
the[IllegalUriException] thrownBy Uri("http://use%2G@host") shouldBe {
- IllegalUriException("Illegal URI reference: Invalid input 'G', expected HEXDIG (line 1, column 13)",
+ IllegalUriException(
+ "Illegal URI reference: Invalid input 'G', expected HEXDIG (line 1, column 13)",
"http://use%2G@host\n" +
" ^")
}
// illegal path
the[IllegalUriException] thrownBy Uri("http://www.example.com/name with spaces/") shouldBe {
- IllegalUriException("Illegal URI reference: Invalid input ' ', expected '/', 'EOI', '#', '?' or pchar (line 1, column 28)",
+ IllegalUriException(
+ "Illegal URI reference: Invalid input ' ', expected '/', 'EOI', '#', '?' or pchar (line 1, column 28)",
"http://www.example.com/name with spaces/\n" +
" ^")
}
// illegal path with control character
the[IllegalUriException] thrownBy Uri("http:///with\newline") shouldBe {
- IllegalUriException("Illegal URI reference: Invalid input '\\n', expected '/', 'EOI', '#', '?' or pchar (line 1, column 13)",
+ IllegalUriException(
+ "Illegal URI reference: Invalid input '\\n', expected '/', 'EOI', '#', '?' or pchar (line 1, column 13)",
"http:///with\n" +
" ^")
}
// illegal query
the[IllegalUriException] thrownBy Uri("?a=b=c").query() shouldBe {
- IllegalUriException("Illegal query: Invalid input '=', expected '+', query-char, 'EOI', '&' or pct-encoded (line 1, column 4)",
+ IllegalUriException(
+ "Illegal query: Invalid input '=', expected '+', query-char, 'EOI', '&' or pct-encoded (line 1, column 4)",
"a=b=c\n" +
" ^")
}
@@ -596,8 +603,8 @@ class UriSpec extends WordSpec with Matchers {
uri.withUserInfo("someInfo") shouldEqual Uri("http://someInfo@host/path?query#fragment")
explicitDefault.withUserInfo("someInfo") shouldEqual Uri("http://someInfo@host:80/path?query#fragment")
- uri.withQuery(Query("param1" -> "value1")) shouldEqual Uri("http://host/path?param1=value1#fragment")
- uri.withQuery(Query(Map("param1" -> "value1"))) shouldEqual Uri("http://host/path?param1=value1#fragment")
+ uri.withQuery(Query("param1" → "value1")) shouldEqual Uri("http://host/path?param1=value1#fragment")
+ uri.withQuery(Query(Map("param1" → "value1"))) shouldEqual Uri("http://host/path?param1=value1#fragment")
uri.withRawQueryString("param1=value1") shouldEqual Uri("http://host/path?param1=value1#fragment")
uri.withFragment("otherFragment") shouldEqual Uri("http://host/path?query#otherFragment")
diff --git a/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala b/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala
index 06ef6939c5..1312acde54 100644
--- a/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala
+++ b/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala
@@ -79,10 +79,10 @@ class HttpModelIntegrationSpec extends WordSpec with Matchers with BeforeAndAfte
}
val textHeaders: Seq[(String, String)] = entityTextHeaders ++ partialTextHeaders
textHeaders shouldEqual Seq(
- "Content-Type" -> "application/json",
- "Content-Length" -> "5",
- "Host" -> "localhost",
- "Origin" -> "null")
+ "Content-Type" → "application/json",
+ "Content-Length" → "5",
+ "Host" → "localhost",
+ "Origin" → "null")
// Finally convert the body into an Array[Byte].
@@ -98,9 +98,9 @@ class HttpModelIntegrationSpec extends WordSpec with Matchers with BeforeAndAfte
// example simple model of an HTTP response.
val textHeaders: Seq[(String, String)] = Seq(
- "Content-Type" -> "text/plain",
- "Content-Length" -> "3",
- "X-Greeting" -> "Hello")
+ "Content-Type" → "text/plain",
+ "Content-Length" → "3",
+ "X-Greeting" → "Hello")
val byteArrayBody: Array[Byte] = "foo".getBytes
// Now we need to convert this model to Akka HTTP's model. To do that
diff --git a/akka-http-testkit/src/main/scala/akka/http/javadsl/testkit/TestRouteResult.scala b/akka-http-testkit/src/main/scala/akka/http/javadsl/testkit/TestRouteResult.scala
index 56b450032b..aee99e08b9 100644
--- a/akka-http-testkit/src/main/scala/akka/http/javadsl/testkit/TestRouteResult.scala
+++ b/akka-http-testkit/src/main/scala/akka/http/javadsl/testkit/TestRouteResult.scala
@@ -17,7 +17,7 @@ import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.http.scaladsl.model.HttpResponse
import akka.http.impl.util._
import akka.http.impl.util.JavaMapping.Implicits._
-import akka.http.javadsl.server.{Rejection, RoutingJavaMapping, Unmarshaller}
+import akka.http.javadsl.server.{ Rejection, RoutingJavaMapping, Unmarshaller }
import RoutingJavaMapping._
import akka.http.javadsl.model._
@@ -31,17 +31,17 @@ import scala.annotation.varargs
* implementations for the abstract assertion methods.
*/
abstract class TestRouteResult(_result: RouteResult, awaitAtMost: FiniteDuration)(implicit ec: ExecutionContext, materializer: Materializer) {
-
+
private def _response = _result match {
case scaladsl.server.RouteResult.Complete(r) ⇒ r
case scaladsl.server.RouteResult.Rejected(rejections) ⇒ doFail("Expected route to complete, but was instead rejected with " + rejections)
}
-
+
private def _rejections = _result match {
case scaladsl.server.RouteResult.Complete(r) ⇒ doFail("Request was not rejected, response was " + r)
case scaladsl.server.RouteResult.Rejected(ex) ⇒ ex
- }
-
+ }
+
/**
* Returns the strictified entity of the response. It will be strictified on first access.
*/
@@ -56,12 +56,12 @@ abstract class TestRouteResult(_result: RouteResult, awaitAtMost: FiniteDuration
* Returns the response's content-type
*/
def contentType: ContentType = _response.entity.contentType
-
+
/**
* Returns a string representation of the response's content-type
*/
def contentTypeString: String = contentType.toString
-
+
/**
* Returns the media-type of the the response's content-type
*/
@@ -113,7 +113,7 @@ abstract class TestRouteResult(_result: RouteResult, awaitAtMost: FiniteDuration
* Fails the test if the route completes with a response rather than having been rejected.
*/
def rejections: java.util.List[Rejection] = _rejections.map(_.asJava).asJava
-
+
/**
* Expects the route to have been rejected with a single rejection.
* Fails the test if the route completes with a response, or is rejected with 0 or >1 rejections.
@@ -158,7 +158,7 @@ abstract class TestRouteResult(_result: RouteResult, awaitAtMost: FiniteDuration
*/
def assertContentType(expected: ContentType): TestRouteResult =
assertEqualsKind(expected, contentType, "content type")
-
+
/**
* Assert on the response entity to be a UTF8 representation of the given string.
*/
@@ -174,7 +174,7 @@ abstract class TestRouteResult(_result: RouteResult, awaitAtMost: FiniteDuration
/**
* Assert on the response entity to equal the given object after applying an [[akka.http.javadsl.server.Unmarshaller]].
*/
- def assertEntityAs[T <: AnyRef](unmarshaller: Unmarshaller[HttpEntity,T], expected: T): TestRouteResult =
+ def assertEntityAs[T <: AnyRef](unmarshaller: Unmarshaller[HttpEntity, T], expected: T): TestRouteResult =
assertEqualsKind(expected, entity(unmarshaller), "entity")
/**
@@ -201,17 +201,18 @@ abstract class TestRouteResult(_result: RouteResult, awaitAtMost: FiniteDuration
val lowercased = name.toRootLowerCase
val headers = response.headers.filter(_.is(lowercased))
if (headers.isEmpty) fail(s"Expected `$name` header was missing.")
- else assertTrue(headers.exists(_.value == value),
+ else assertTrue(
+ headers.exists(_.value == value),
s"`$name` header was found but had the wrong value. Found headers: ${headers.mkString(", ")}")
this
}
-
+
@varargs def assertRejections(expectedRejections: Rejection*): TestRouteResult = {
if (rejections.asScala == expectedRejections.toSeq) {
this
} else {
- doFail(s"Expected rejections [${expectedRejections.mkString(",")}], but rejected with [${rejections.asScala.mkString(",")}] instead.")
+ doFail(s"Expected rejections [${expectedRejections.mkString(",")}], but rejected with [${rejections.asScala.mkString(",")}] instead.")
}
}
diff --git a/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala b/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala
index 23581b160e..2c71fc3538 100644
--- a/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala
+++ b/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala
@@ -139,12 +139,13 @@ trait RouteTest extends RequestBuilding with WSTestRequestBuilding with RouteTes
type Out = HttpRequest
def apply(request: HttpRequest, f: HttpRequest ⇒ HttpRequest) = f(request)
}
- implicit def injectIntoRoute(implicit timeout: RouteTestTimeout,
- defaultHostInfo: DefaultHostInfo,
- routingSettings: RoutingSettings,
+ implicit def injectIntoRoute(implicit
+ timeout: RouteTestTimeout,
+ defaultHostInfo: DefaultHostInfo,
+ routingSettings: RoutingSettings,
executionContext: ExecutionContext,
- materializer: Materializer,
- routingLog: RoutingLog,
+ materializer: Materializer,
+ routingLog: RoutingLog,
rejectionHandler: RejectionHandler = RejectionHandler.default,
exceptionHandler: ExceptionHandler = null) =
new TildeArrow[RequestContext, Future[RouteResult]] {
diff --git a/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/WSTestRequestBuilding.scala b/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/WSTestRequestBuilding.scala
index bd2e34543d..ea9e0a92ba 100644
--- a/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/WSTestRequestBuilding.scala
+++ b/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/WSTestRequestBuilding.scala
@@ -20,10 +20,11 @@ trait WSTestRequestBuilding { self: RouteTest ⇒
def handleMessages(handlerFlow: Graph[FlowShape[Message, Message], Any], subprotocol: Option[String]): HttpResponse = {
clientSideHandler.join(handlerFlow).run()
- HttpResponse(StatusCodes.SwitchingProtocols,
+ HttpResponse(
+ StatusCodes.SwitchingProtocols,
headers =
- Upgrade(UpgradeProtocol("websocket") :: Nil) ::
- subprotocol.map(p ⇒ `Sec-WebSocket-Protocol`(p :: Nil)).toList)
+ Upgrade(UpgradeProtocol("websocket") :: Nil) ::
+ subprotocol.map(p ⇒ `Sec-WebSocket-Protocol`(p :: Nil)).toList)
}
})
}
diff --git a/akka-http-tests/src/test/scala/akka/http/javadsl/DirectivesConsistencySpec.scala b/akka-http-tests/src/test/scala/akka/http/javadsl/DirectivesConsistencySpec.scala
index 57fc95c57b..c4cbd5b53c 100644
--- a/akka-http-tests/src/test/scala/akka/http/javadsl/DirectivesConsistencySpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/javadsl/DirectivesConsistencySpec.scala
@@ -60,7 +60,7 @@ class DirectivesConsistencySpec extends WordSpec with Matchers {
d ← javaDirectives
if d.isAnnotationPresent(classOf[CorrespondsTo])
annot = d.getAnnotation(classOf[CorrespondsTo])
- } yield d.getName -> annot.value()
+ } yield d.getName → annot.value()
Map(javaToScalaMappings.toList: _*)
}
@@ -82,12 +82,12 @@ class DirectivesConsistencySpec extends WordSpec with Matchers {
}
val allowMissing: Map[Class[_], Set[String]] = Map(
- scalaDirectivesClazz -> Set(
+ scalaDirectivesClazz → Set(
"route", "request",
"completeOK", // solved by raw complete() in Scala
"defaultDirectoryRenderer", "defaultContentTypeResolver" // solved by implicits in Scala
- ),
- javaDirectivesClazz -> Set(
+ ),
+ javaDirectivesClazz → Set(
"as",
"instanceOf",
"pass",
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/FormDataSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/FormDataSpec.scala
index 83fbb746f4..2cec74a209 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/FormDataSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/FormDataSpec.scala
@@ -18,7 +18,7 @@ class FormDataSpec extends AkkaSpec {
implicit val materializer = ActorMaterializer()
import system.dispatcher
- val formData = FormData(Map("surname" -> "Smith", "age" -> "42"))
+ val formData = FormData(Map("surname" → "Smith", "age" → "42"))
"The FormData infrastructure" should {
"properly round-trip the fields of www-urlencoded forms" in {
@@ -27,10 +27,10 @@ class FormDataSpec extends AkkaSpec {
}
"properly marshal www-urlencoded forms containing special chars" in {
- Marshal(FormData(Map("name" -> "Smith&Wesson"))).to[HttpEntity]
+ Marshal(FormData(Map("name" → "Smith&Wesson"))).to[HttpEntity]
.flatMap(Unmarshal(_).to[String]).futureValue shouldEqual "name=Smith%26Wesson"
- Marshal(FormData(Map("name" -> "Smith+Wesson; hopefully!"))).to[HttpEntity]
+ Marshal(FormData(Map("name" → "Smith+Wesson; hopefully!"))).to[HttpEntity]
.flatMap(Unmarshal(_).to[String]).futureValue shouldEqual "name=Smith%2BWesson%3B+hopefully%21"
}
}
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationGivenResponseCodeSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationGivenResponseCodeSpec.scala
index 5348fd769d..22e0c35a12 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationGivenResponseCodeSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationGivenResponseCodeSpec.scala
@@ -15,9 +15,9 @@ class ContentNegotiationGivenResponseCodeSpec extends RoutingSpec {
pathPrefix(Segment) { mode ⇒
complete {
mode match {
- case "200-text" ⇒ OK -> "ok"
- case "201-text" ⇒ Created -> "created"
- case "400-text" ⇒ BadRequest -> "bad-request"
+ case "200-text" ⇒ OK → "ok"
+ case "201-text" ⇒ Created → "created"
+ case "400-text" ⇒ BadRequest → "bad-request"
}
}
}
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationSpec.scala
index 770623d887..95110ec915 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/ContentNegotiationSpec.scala
@@ -107,7 +107,8 @@ class ContentNegotiationSpec extends FreeSpec with Matchers {
}
"Accept-Charset: UTF-8, *;q=0.8, us;q=0.1" test { accept ⇒
- accept(`text/plain` withCharset `US-ASCII`,
+ accept(
+ `text/plain` withCharset `US-ASCII`,
`text/plain` withCharset `ISO-8859-1`) should select(`text/plain` withCharset `ISO-8859-1`)
}
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/MarshallingSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/MarshallingSpec.scala
index 62980fbbdb..4d73ee31a1 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/MarshallingSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/marshalling/MarshallingSpec.scala
@@ -34,7 +34,7 @@ class MarshallingSpec extends FreeSpec with Matchers with BeforeAndAfterAll with
marshal("Ha“llo".toCharArray) shouldEqual HttpEntity("Ha“llo")
}
"FormDataMarshaller should marshal FormData instances to application/x-www-form-urlencoded content" in {
- marshal(FormData(Map("name" -> "Bob", "pass" -> "hällo", "admin" -> ""))) shouldEqual
+ marshal(FormData(Map("name" → "Bob", "pass" → "hällo", "admin" → ""))) shouldEqual
HttpEntity(`application/x-www-form-urlencoded` withCharset `UTF-8`, "name=Bob&pass=h%C3%A4llo&admin=")
}
}
@@ -65,7 +65,7 @@ class MarshallingSpec extends FreeSpec with Matchers with BeforeAndAfterAll with
"one non-empty part" in {
marshal(Multipart.General(`multipart/alternative`, Multipart.General.BodyPart.Strict(
entity = HttpEntity(ContentTypes.`text/plain(UTF-8)`, "test@there.com"),
- headers = `Content-Disposition`(ContentDispositionTypes.`form-data`, Map("name" -> "email")) :: Nil))) shouldEqual
+ headers = `Content-Disposition`(ContentDispositionTypes.`form-data`, Map("name" → "email")) :: Nil))) shouldEqual
HttpEntity(
contentType = `multipart/alternative` withBoundary randomBoundary withCharset `UTF-8`,
string = s"""--$randomBoundary
@@ -76,7 +76,8 @@ class MarshallingSpec extends FreeSpec with Matchers with BeforeAndAfterAll with
|--$randomBoundary--""".stripMarginWithNewline("\r\n"))
}
"two different parts" in {
- marshal(Multipart.General(`multipart/related`,
+ marshal(Multipart.General(
+ `multipart/related`,
Multipart.General.BodyPart.Strict(HttpEntity(`text/plain` withCharset `US-ASCII`, "first part, with a trailing linebreak\r\n")),
Multipart.General.BodyPart.Strict(
HttpEntity(`application/octet-stream`, ByteString("filecontent")),
@@ -100,8 +101,8 @@ class MarshallingSpec extends FreeSpec with Matchers with BeforeAndAfterAll with
"multipartFormDataMarshaller should correctly marshal 'multipart/form-data' content with" - {
"two fields" in {
marshal(Multipart.FormData(ListMap(
- "surname" -> HttpEntity("Mike"),
- "age" -> marshal(42)))) shouldEqual
+ "surname" → HttpEntity("Mike"),
+ "age" → marshal(42)))) shouldEqual
HttpEntity(
contentType = `multipart/form-data` withBoundary randomBoundary withCharset `UTF-8`,
string = s"""--$randomBoundary
@@ -120,9 +121,9 @@ class MarshallingSpec extends FreeSpec with Matchers with BeforeAndAfterAll with
"two fields having a custom `Content-Disposition`" in {
marshal(Multipart.FormData(Source(List(
Multipart.FormData.BodyPart("attachment[0]", HttpEntity(`text/csv` withCharset `UTF-8`, "name,age\r\n\"John Doe\",20\r\n"),
- Map("filename" -> "attachment.csv")),
+ Map("filename" → "attachment.csv")),
Multipart.FormData.BodyPart("attachment[1]", HttpEntity("naice!".getBytes),
- Map("filename" -> "attachment2.csv"), List(RawHeader("Content-Transfer-Encoding", "binary"))))))) shouldEqual
+ Map("filename" → "attachment2.csv"), List(RawHeader("Content-Transfer-Encoding", "binary"))))))) shouldEqual
HttpEntity(
contentType = `multipart/form-data` withBoundary randomBoundary withCharset `UTF-8`,
string = s"""--$randomBoundary
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/DontLeakActorsOnFailingConnectionSpecs.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/DontLeakActorsOnFailingConnectionSpecs.scala
index 790e38b7f8..ceb19eca38 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/DontLeakActorsOnFailingConnectionSpecs.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/DontLeakActorsOnFailingConnectionSpecs.scala
@@ -41,7 +41,7 @@ class DontLeakActorsOnFailingConnectionSpecs extends WordSpecLike with Matchers
val reqsCount = 100
val clientFlow = Http().superPool[Int]()
val (_, _, port) = TestUtils.temporaryServerHostnameAndPort()
- val source = Source(1 to reqsCount).map(i ⇒ HttpRequest(uri = Uri(s"http://127.0.0.1:$port/test/$i")) -> i)
+ val source = Source(1 to reqsCount).map(i ⇒ HttpRequest(uri = Uri(s"http://127.0.0.1:$port/test/$i")) → i)
val countDown = new CountDownLatch(reqsCount)
val sink = Sink.foreach[(Try[HttpResponse], Int)] {
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/TestServer.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/TestServer.scala
index 39d37885c7..ecca3305dc 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/TestServer.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/TestServer.scala
@@ -35,7 +35,8 @@ object TestServer extends App {
val bindingFuture = Http().bindAndHandle({
get {
path("") {
- withRequestTimeout(1.milli, _ ⇒ HttpResponse(StatusCodes.EnhanceYourCalm,
+ withRequestTimeout(1.milli, _ ⇒ HttpResponse(
+ StatusCodes.EnhanceYourCalm,
entity = "Unable to serve response within time limit, please enchance your calm.")) {
Thread.sleep(1000)
complete(index)
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/CookieDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/CookieDirectivesSpec.scala
index 8195b08ab9..3e9c1f8a27 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/CookieDirectivesSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/CookieDirectivesSpec.scala
@@ -15,7 +15,7 @@ class CookieDirectivesSpec extends RoutingSpec {
"The 'cookie' directive" should {
"extract the respectively named cookie" in {
- Get() ~> addHeader(Cookie("fancy" -> "pants")) ~> {
+ Get() ~> addHeader(Cookie("fancy" → "pants")) ~> {
cookie("fancy") { echoComplete }
} ~> check { responseAs[String] shouldEqual "fancy=pants" }
}
@@ -25,7 +25,7 @@ class CookieDirectivesSpec extends RoutingSpec {
} ~> check { rejection shouldEqual MissingCookieRejection("fancy") }
}
"properly pass through inner rejections" in {
- Get() ~> addHeader(Cookie("fancy" -> "pants")) ~> {
+ Get() ~> addHeader(Cookie("fancy" → "pants")) ~> {
cookie("fancy") { c ⇒ reject(ValidationRejection("Dont like " + c.value)) }
} ~> check { rejection shouldEqual ValidationRejection("Dont like pants") }
}
@@ -56,7 +56,7 @@ class CookieDirectivesSpec extends RoutingSpec {
"The 'optionalCookie' directive" should {
"produce a `Some(cookie)` extraction if the cookie is present" in {
- Get() ~> Cookie("abc" -> "123") ~> {
+ Get() ~> Cookie("abc" → "123") ~> {
optionalCookie("abc") { echoComplete }
} ~> check { responseAs[String] shouldEqual "Some(abc=123)" }
}
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FileUploadDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FileUploadDirectivesSpec.scala
index 7851be1391..d0c23a0789 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FileUploadDirectivesSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FileUploadDirectivesSpec.scala
@@ -27,7 +27,7 @@ class FileUploadDirectivesSpec extends RoutingSpec {
Multipart.FormData(Multipart.FormData.BodyPart.Strict(
"fieldName",
HttpEntity(ContentTypes.`text/xml(UTF-8)`, xml),
- Map("filename" -> "age.xml")))
+ Map("filename" → "age.xml")))
@volatile var file: Option[File] = None
@@ -77,7 +77,7 @@ class FileUploadDirectivesSpec extends RoutingSpec {
Multipart.FormData(Multipart.FormData.BodyPart.Strict(
"field1",
HttpEntity(ContentTypes.`text/plain(UTF-8)`, str1),
- Map("filename" -> "data1.txt")))
+ Map("filename" → "data1.txt")))
Post("/", multipartForm) ~> route ~> check {
status shouldEqual StatusCodes.OK
@@ -98,11 +98,11 @@ class FileUploadDirectivesSpec extends RoutingSpec {
Multipart.FormData.BodyPart.Strict(
"field1",
HttpEntity(ContentTypes.`text/plain(UTF-8)`, str1),
- Map("filename" -> "data1.txt")),
+ Map("filename" → "data1.txt")),
Multipart.FormData.BodyPart.Strict(
"field1",
HttpEntity(ContentTypes.`text/plain(UTF-8)`, str2),
- Map("filename" -> "data2.txt")))
+ Map("filename" → "data2.txt")))
Post("/", multipartForm) ~> route ~> check {
status shouldEqual StatusCodes.OK
@@ -135,7 +135,7 @@ class FileUploadDirectivesSpec extends RoutingSpec {
Multipart.FormData(Multipart.FormData.BodyPart.Strict(
"field1",
HttpEntity(ContentTypes.`text/plain(UTF-8)`, str1),
- Map("filename" -> "data1.txt")))
+ Map("filename" → "data1.txt")))
Post("/", multipartForm) ~> route ~> check {
rejection === MissingFormFieldRejection("missing")
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FormFieldDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FormFieldDirectivesSpec.scala
index 2945554e89..f253dabb30 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FormFieldDirectivesSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/FormFieldDirectivesSpec.scala
@@ -19,24 +19,24 @@ class FormFieldDirectivesSpec extends RoutingSpec {
ScalaXmlSupport.nodeSeqUnmarshaller(`text/xml`, `text/html`, `text/plain`)
val nodeSeq: xml.NodeSeq = yes
- val urlEncodedForm = FormData(Map("firstName" -> "Mike", "age" -> "42"))
- val urlEncodedFormWithVip = FormData(Map("firstName" -> "Mike", "age" -> "42", "VIP" -> "true", "super" -> "no"))
+ val urlEncodedForm = FormData(Map("firstName" → "Mike", "age" → "42"))
+ val urlEncodedFormWithVip = FormData(Map("firstName" → "Mike", "age" → "42", "VIP" → "true", "super" → "no"))
val multipartForm = Multipart.FormData {
Map(
- "firstName" -> HttpEntity("Mike"),
- "age" -> HttpEntity(ContentTypes.`text/xml(UTF-8)`, "42"),
- "VIPBoolean" -> HttpEntity("true"))
+ "firstName" → HttpEntity("Mike"),
+ "age" → HttpEntity(ContentTypes.`text/xml(UTF-8)`, "42"),
+ "VIPBoolean" → HttpEntity("true"))
}
val multipartFormWithTextHtml = Multipart.FormData {
Map(
- "firstName" -> HttpEntity("Mike"),
- "age" -> HttpEntity(ContentTypes.`text/xml(UTF-8)`, "42"),
- "VIP" -> HttpEntity(ContentTypes.`text/html(UTF-8)`, "yes"),
- "super" -> HttpEntity("no"))
+ "firstName" → HttpEntity("Mike"),
+ "age" → HttpEntity(ContentTypes.`text/xml(UTF-8)`, "42"),
+ "VIP" → HttpEntity(ContentTypes.`text/html(UTF-8)`, "yes"),
+ "super" → HttpEntity("no"))
}
val multipartFormWithFile = Multipart.FormData(
Multipart.FormData.BodyPart.Strict("file", HttpEntity(ContentTypes.`text/xml(UTF-8)`, "42"),
- Map("filename" -> "age.xml")))
+ Map("filename" → "age.xml")))
"The 'formFields' extraction directive" should {
"properly extract the value of www-urlencoded form fields" in {
@@ -142,22 +142,22 @@ class FormFieldDirectivesSpec extends RoutingSpec {
"The 'formField' repeated directive" should {
"extract an empty Iterable when the parameter is absent" in {
- Post("/", FormData("age" -> "42")) ~> {
+ Post("/", FormData("age" → "42")) ~> {
formField('hobby.*) { echoComplete }
} ~> check { responseAs[String] === "List()" }
}
"extract all occurrences into an Iterable when parameter is present" in {
- Post("/", FormData("age" -> "42", "hobby" -> "cooking", "hobby" -> "reading")) ~> {
+ Post("/", FormData("age" → "42", "hobby" → "cooking", "hobby" → "reading")) ~> {
formField('hobby.*) { echoComplete }
} ~> check { responseAs[String] === "List(cooking, reading)" }
}
"extract as Iterable[Int]" in {
- Post("/", FormData("age" -> "42", "number" -> "3", "number" -> "5")) ~> {
+ Post("/", FormData("age" → "42", "number" → "3", "number" → "5")) ~> {
formField('number.as[Int].*) { echoComplete }
} ~> check { responseAs[String] === "List(3, 5)" }
}
"extract as Iterable[Int] with an explicit deserializer" in {
- Post("/", FormData("age" -> "42", "number" -> "3", "number" -> "A")) ~> {
+ Post("/", FormData("age" → "42", "number" → "3", "number" → "A")) ~> {
formField('number.as(HexInt).*) { echoComplete }
} ~> check { responseAs[String] === "List(3, 10)" }
}
@@ -165,7 +165,7 @@ class FormFieldDirectivesSpec extends RoutingSpec {
"The 'formFieldMap' directive" should {
"extract fields with different keys" in {
- Post("/", FormData("age" -> "42", "numberA" -> "3", "numberB" -> "5")) ~> {
+ Post("/", FormData("age" → "42", "numberA" → "3", "numberB" → "5")) ~> {
formFieldMap { echoComplete }
} ~> check { responseAs[String] shouldEqual "Map(age -> 42, numberA -> 3, numberB -> 5)" }
}
@@ -173,7 +173,7 @@ class FormFieldDirectivesSpec extends RoutingSpec {
"The 'formFieldSeq' directive" should {
"extract all fields" in {
- Post("/", FormData("age" -> "42", "number" -> "3", "number" -> "5")) ~> {
+ Post("/", FormData("age" → "42", "number" → "3", "number" → "5")) ~> {
formFieldSeq { echoComplete }
} ~> check { responseAs[String] shouldEqual "Vector((age,42), (number,3), (number,5))" }
}
@@ -186,7 +186,7 @@ class FormFieldDirectivesSpec extends RoutingSpec {
"The 'formFieldMultiMap' directive" should {
"extract fields with different keys (with duplicates)" in {
- Post("/", FormData("age" -> "42", "number" -> "3", "number" -> "5")) ~> {
+ Post("/", FormData("age" → "42", "number" → "3", "number" → "5")) ~> {
formFieldMultiMap { echoComplete }
} ~> check { responseAs[String] shouldEqual "Map(age -> List(42), number -> List(5, 3))" }
}
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/PathDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/PathDirectivesSpec.scala
index 1b3021c951..9a5f199e7a 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/PathDirectivesSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/PathDirectivesSpec.scala
@@ -103,7 +103,7 @@ class PathDirectivesSpec extends RoutingSpec with Inside {
}
"pathPrefix(Map(\"red\" -> 1, \"green\" -> 2, \"blue\" -> 3))" should {
- val test = testFor(pathPrefix(Map("red" -> 1, "green" -> 2, "blue" -> 3)) { echoCaptureAndUnmatchedPath })
+ val test = testFor(pathPrefix(Map("red" → 1, "green" → 2, "blue" → 3)) { echoCaptureAndUnmatchedPath })
"accept [/green]" in test("2:")
"accept [/redsea]" in test("1:sea")
"reject [/black]" in test()
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/RouteDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/RouteDirectivesSpec.scala
index 662b09109d..26e9f65b29 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/RouteDirectivesSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/RouteDirectivesSpec.scala
@@ -78,7 +78,7 @@ class RouteDirectivesSpec extends FreeSpec with GenericRoutingSpec {
case AlreadyRegistered ⇒
import spray.json.DefaultJsonProtocol._
import SprayJsonSupport._
- StatusCodes.BadRequest -> Map("error" -> "User already Registered")
+ StatusCodes.BadRequest → Map("error" → "User already Registered")
}
}
}
@@ -119,7 +119,8 @@ class RouteDirectivesSpec extends FreeSpec with GenericRoutingSpec {
} ~> check {
response shouldEqual HttpResponse(
status = 302,
- entity = HttpEntity(ContentTypes.`text/html(UTF-8)`,
+ entity = HttpEntity(
+ ContentTypes.`text/html(UTF-8)`,
"The requested resource temporarily resides under this URI."),
headers = Location("/foo") :: Nil)
}
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/TimeoutDirectivesSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/TimeoutDirectivesSpec.scala
index 8a324f3bb9..31b89e0f22 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/TimeoutDirectivesSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/server/directives/TimeoutDirectivesSpec.scala
@@ -31,7 +31,8 @@ class TimeoutDirectivesSpec extends IntegrationRoutingSpec {
}
"allow mapping the response" in {
- val timeoutResponse = HttpResponse(StatusCodes.EnhanceYourCalm,
+ val timeoutResponse = HttpResponse(
+ StatusCodes.EnhanceYourCalm,
entity = "Unable to serve response within time limit, please enchance your calm.")
val route =
diff --git a/akka-http-tests/src/test/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallersSpec.scala b/akka-http-tests/src/test/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallersSpec.scala
index 673394be33..e81b302e46 100644
--- a/akka-http-tests/src/test/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallersSpec.scala
+++ b/akka-http-tests/src/test/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallersSpec.scala
@@ -29,13 +29,15 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
"multipartGeneralUnmarshaller should correctly unmarshal 'multipart/*' content with" - {
"an empty part" in {
- Unmarshal(HttpEntity(`multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
"""--XYZABC
|--XYZABC--""".stripMarginWithNewline("\r\n"))).to[Multipart.General] should haveParts(
Multipart.General.BodyPart.Strict(HttpEntity.empty(ContentTypes.`text/plain(UTF-8)`)))
}
"two empty parts" in {
- Unmarshal(HttpEntity(`multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
"""--XYZABC
|--XYZABC
|--XYZABC--""".stripMarginWithNewline("\r\n"))).to[Multipart.General] should haveParts(
@@ -43,7 +45,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
Multipart.General.BodyPart.Strict(HttpEntity.empty(ContentTypes.`text/plain(UTF-8)`)))
}
"a part without entity and missing header separation CRLF" in {
- Unmarshal(HttpEntity(`multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
"""--XYZABC
|Content-type: text/xml
|Age: 12
@@ -51,7 +54,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
Multipart.General.BodyPart.Strict(HttpEntity.empty(ContentTypes.`text/xml(UTF-8)`), List(Age(12))))
}
"an implicitly typed part (without headers) (Strict)" in {
- Unmarshal(HttpEntity(`multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
"""--XYZABC
|
|Perfectly fine part content.
@@ -69,7 +73,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
Multipart.General.BodyPart.Strict(HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Perfectly fine part content.")))
}
"one non-empty form-data part" in {
- Unmarshal(HttpEntity(`multipart/form-data` withBoundary "-" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/form-data` withBoundary "-" withCharset `UTF-8`,
"""---
|Content-type: text/plain; charset=UTF8
|content-disposition: form-data; name="email"
@@ -78,10 +83,11 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
|-----""".stripMarginWithNewline("\r\n"))).to[Multipart.General] should haveParts(
Multipart.General.BodyPart.Strict(
HttpEntity(ContentTypes.`text/plain(UTF-8)`, "test@there.com"),
- List(`Content-Disposition`(ContentDispositionTypes.`form-data`, Map("name" -> "email")))))
+ List(`Content-Disposition`(ContentDispositionTypes.`form-data`, Map("name" → "email")))))
}
"two different parts" in {
- Unmarshal(HttpEntity(`multipart/mixed` withBoundary "12345" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "12345" withCharset `UTF-8`,
"""--12345
|
|first part, with a trailing newline
@@ -93,11 +99,13 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
|filecontent
|--12345--""".stripMarginWithNewline("\r\n"))).to[Multipart.General] should haveParts(
Multipart.General.BodyPart.Strict(HttpEntity(ContentTypes.`text/plain(UTF-8)`, "first part, with a trailing newline\r\n")),
- Multipart.General.BodyPart.Strict(HttpEntity(`application/octet-stream`, ByteString("filecontent")),
+ Multipart.General.BodyPart.Strict(
+ HttpEntity(`application/octet-stream`, ByteString("filecontent")),
List(RawHeader("Content-Transfer-Encoding", "binary"))))
}
"illegal headers" in (
- Unmarshal(HttpEntity(`multipart/form-data` withBoundary "XYZABC" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/form-data` withBoundary "XYZABC" withCharset `UTF-8`,
"""--XYZABC
|Date: unknown
|content-disposition: form-data; name=email
@@ -106,10 +114,12 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
|--XYZABC--""".stripMarginWithNewline("\r\n"))).to[Multipart.General] should haveParts(
Multipart.General.BodyPart.Strict(
HttpEntity(ContentTypes.`text/plain(UTF-8)`, "test@there.com"),
- List(RawHeader("date", "unknown"),
- `Content-Disposition`(ContentDispositionTypes.`form-data`, Map("name" -> "email"))))))
+ List(
+ RawHeader("date", "unknown"),
+ `Content-Disposition`(ContentDispositionTypes.`form-data`, Map("name" → "email"))))))
"a full example (Strict)" in {
- Unmarshal(HttpEntity(`multipart/mixed` withBoundary "12345" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "12345" withCharset `UTF-8`,
"""preamble and
|more preamble
|--12345
@@ -145,7 +155,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
Multipart.General.BodyPart.Strict(HttpEntity(`application/octet-stream`, ByteString("second part, explicitly typed"))))
}
"a boundary with spaces" in {
- Unmarshal(HttpEntity(`multipart/mixed` withBoundary "simple boundary" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "simple boundary" withCharset `UTF-8`,
"""--simple boundary
|--simple boundary--""".stripMarginWithNewline("\r\n"))).to[Multipart.General] should haveParts(
Multipart.General.BodyPart.Strict(HttpEntity.empty(ContentTypes.`text/plain(UTF-8)`)))
@@ -158,13 +169,15 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
.to[Multipart.General].failed, 1.second).getMessage shouldEqual "Unexpected end of multipart entity"
}
"an entity without initial boundary" in {
- Await.result(Unmarshal(HttpEntity(`multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
+ Await.result(Unmarshal(HttpEntity(
+ `multipart/mixed` withBoundary "XYZABC" withCharset `UTF-8`,
"""this is
|just preamble text""".stripMarginWithNewline("\r\n")))
.to[Multipart.General].failed, 1.second).getMessage shouldEqual "Unexpected end of multipart entity"
}
"a stray boundary" in {
- Await.result(Unmarshal(HttpEntity(`multipart/form-data` withBoundary "ABC" withCharset `UTF-8`,
+ Await.result(Unmarshal(HttpEntity(
+ `multipart/form-data` withBoundary "ABC" withCharset `UTF-8`,
"""--ABC
|Content-type: text/plain; charset=UTF8
|--ABCContent-type: application/json
@@ -173,7 +186,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
.to[Multipart.General].failed, 1.second).getMessage shouldEqual "Illegal multipart boundary in message content"
}
"duplicate Content-Type header" in {
- Await.result(Unmarshal(HttpEntity(`multipart/form-data` withBoundary "-" withCharset `UTF-8`,
+ Await.result(Unmarshal(HttpEntity(
+ `multipart/form-data` withBoundary "-" withCharset `UTF-8`,
"""---
|Content-type: text/plain; charset=UTF8
|Content-type: application/json
@@ -185,7 +199,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
"multipart part must not contain more than one Content-Type header"
}
"a missing header-separating CRLF (in Strict entity)" in {
- Await.result(Unmarshal(HttpEntity(`multipart/form-data` withBoundary "-" withCharset `UTF-8`,
+ Await.result(Unmarshal(HttpEntity(
+ `multipart/form-data` withBoundary "-" withCharset `UTF-8`,
"""---
|not good here
|-----""".stripMarginWithNewline("\r\n")))
@@ -207,19 +222,20 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
"a boundary with a trailing space" in {
Await.result(
Unmarshal(HttpEntity(`multipart/mixed` withBoundary "simple boundary " withCharset `UTF-8`, ByteString.empty))
- .to[Multipart.General].failed, 1.second).getMessage shouldEqual
+ .to[Multipart.General].failed, 1.second).getMessage shouldEqual
"requirement failed: 'boundary' parameter of multipart Content-Type must not end with a space char"
}
"a boundary with an illegal character" in {
Await.result(
Unmarshal(HttpEntity(`multipart/mixed` withBoundary "simple&boundary" withCharset `UTF-8`, ByteString.empty))
- .to[Multipart.General].failed, 1.second).getMessage shouldEqual
+ .to[Multipart.General].failed, 1.second).getMessage shouldEqual
"requirement failed: 'boundary' parameter of multipart Content-Type contains illegal character '&'"
}
}
"multipartByteRangesUnmarshaller should correctly unmarshal multipart/byteranges content with two different parts" in {
- Unmarshal(HttpEntity(`multipart/byteranges` withBoundary "12345" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/byteranges` withBoundary "12345" withCharset `UTF-8`,
"""--12345
|Content-Range: bytes 0-2/26
|Content-Type: text/plain
@@ -237,7 +253,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
"multipartFormDataUnmarshaller should correctly unmarshal 'multipart/form-data' content" - {
"with one element and no explicit content-type" in {
- Unmarshal(HttpEntity(`multipart/form-data` withBoundary "XYZABC" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/form-data` withBoundary "XYZABC" withCharset `UTF-8`,
"""--XYZABC
|content-disposition: form-data; name=email
|
@@ -246,7 +263,8 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
Multipart.FormData.BodyPart.Strict("email", HttpEntity(ContentTypes.`text/plain(UTF-8)`, "test@there.com")))
}
"with one element" in {
- Unmarshal(HttpEntity(`multipart/form-data` withBoundary "XYZABC" withCharset `UTF-8`,
+ Unmarshal(HttpEntity(
+ `multipart/form-data` withBoundary "XYZABC" withCharset `UTF-8`,
"""--XYZABC
|content-disposition: form-data; name=email
|Content-Type: application/octet-stream
@@ -284,7 +302,7 @@ class MultipartUnmarshallersSpec extends FreeSpec with Matchers with BeforeAndAf
})
}.to[Multipart.FormData].flatMap(_.toStrict(1.second)) should haveParts(
Multipart.FormData.BodyPart.Strict("email", HttpEntity(`application/octet-stream`, ByteString("test@there.com"))),
- Multipart.FormData.BodyPart.Strict("userfile", HttpEntity(`application/pdf`, ByteString("filecontent")), Map("filename" -> "test€.dat"),
+ Multipart.FormData.BodyPart.Strict("userfile", HttpEntity(`application/pdf`, ByteString("filecontent")), Map("filename" → "test€.dat"),
List(
RawHeader("Content-Transfer-Encoding", "binary"),
RawHeader("Content-Additional-1", "anything"),
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/Marshaller.scala b/akka-http/src/main/scala/akka/http/javadsl/server/Marshaller.scala
index 179f5d05a3..ee53d5890d 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/Marshaller.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/Marshaller.scala
@@ -20,7 +20,7 @@ import akka.http.javadsl.model.HttpResponse
import akka.http.javadsl.model.HttpRequest
import akka.http.javadsl.model.RequestEntity
import akka.util.ByteString
-import akka.http.scaladsl.model.{FormData, HttpCharset}
+import akka.http.scaladsl.model.{ FormData, HttpCharset }
import akka.http.javadsl.model.StatusCode
import akka.http.javadsl.model.HttpHeader
@@ -36,18 +36,18 @@ object Marshaller {
def fromScala[A, B](scalaMarshaller: marshalling.Marshaller[A, B]): Marshaller[A, B] = new Marshaller()(scalaMarshaller)
/**
- * Safe downcasting of the output type of the marshaller to a superclass.
+ * Safe downcasting of the output type of the marshaller to a superclass.
*
- * Marshaller is covariant in B, i.e. if B2 is a subclass of B1,
- * then Marshaller[X,B2] is OK to use where Marshaller[X,B1] is expected.
+ * Marshaller is covariant in B, i.e. if B2 is a subclass of B1,
+ * then Marshaller[X,B2] is OK to use where Marshaller[X,B1] is expected.
*/
def downcast[A, B1, B2 <: B1](m: Marshaller[A, B2]): Marshaller[A, B1] = m.asInstanceOf[Marshaller[A, B1]]
/**
- * Safe downcasting of the output type of the marshaller to a superclass.
+ * Safe downcasting of the output type of the marshaller to a superclass.
*
- * Marshaller is covariant in B, i.e. if B2 is a subclass of B1,
- * then Marshaller[X,B2] is OK to use where Marshaller[X,B1] is expected.
+ * Marshaller is covariant in B, i.e. if B2 is a subclass of B1,
+ * then Marshaller[X,B2] is OK to use where Marshaller[X,B1] is expected.
*/
def downcast[A, B1, B2 <: B1](m: Marshaller[A, B2], target: Class[B1]): Marshaller[A, B1] = m.asInstanceOf[Marshaller[A, B1]]
@@ -64,20 +64,18 @@ object Marshaller {
def byteStringMarshaller(t: ContentType): Marshaller[ByteString, RequestEntity] =
fromScala(scaladsl.marshalling.Marshaller.byteStringMarshaller(t.asScala))
-
// TODO make sure these are actually usable in a sane way
def wrapEntity[A, C](f: function.BiFunction[ExecutionContext, C, A], m: Marshaller[A, RequestEntity], mediaType: MediaType): Marshaller[C, RequestEntity] = {
val scalaMarshaller = m.asScalaToEntityMarshaller
- fromScala(scalaMarshaller.wrapWithEC(mediaType.asScala) { ctx => c: C => f(ctx, c) } (ContentTypeOverrider.forEntity))
+ fromScala(scalaMarshaller.wrapWithEC(mediaType.asScala) { ctx ⇒ c: C ⇒ f(ctx, c) }(ContentTypeOverrider.forEntity))
}
def wrapEntity[A, C, E <: RequestEntity](f: function.Function[C, A], m: Marshaller[A, E], mediaType: MediaType): Marshaller[C, RequestEntity] = {
val scalaMarshaller = m.asScalaToEntityMarshaller
- fromScala(scalaMarshaller.wrap(mediaType.asScala)((in: C) => f.apply(in))(ContentTypeOverrider.forEntity))
+ fromScala(scalaMarshaller.wrap(mediaType.asScala)((in: C) ⇒ f.apply(in))(ContentTypeOverrider.forEntity))
}
-
def entityToOKResponse[A](m: Marshaller[A, _ <: RequestEntity]): Marshaller[A, HttpResponse] = {
fromScala(marshalling.Marshaller.fromToEntityMarshaller[A]()(m.asScalaToEntityMarshaller))
}
@@ -144,9 +142,7 @@ object Marshaller {
* Helper for creating a synchronous [[Marshaller]] to non-negotiable content from the given function.
*/
def opaque[A, B](f: function.Function[A, B]): Marshaller[A, B] =
- fromScala(scaladsl.marshalling.Marshaller.opaque[A, B] { a => f.apply(a) })
-
-
+ fromScala(scaladsl.marshalling.Marshaller.opaque[A, B] { a ⇒ f.apply(a) })
implicit def asScalaToResponseMarshaller[T](m: Marshaller[T, akka.http.javadsl.model.HttpResponse]): ToResponseMarshaller[T] =
m.asScala.map(_.asScala)
@@ -155,7 +151,7 @@ object Marshaller {
m.asScala.map(_.asScala)
}
-class Marshaller[A, B] private(implicit val asScala: marshalling.Marshaller[A, B]) {
+class Marshaller[A, B] private (implicit val asScala: marshalling.Marshaller[A, B]) {
import Marshaller.fromScala
// TODO would be nice to not need this special case
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/RejectionHandler.scala b/akka-http/src/main/scala/akka/http/javadsl/server/RejectionHandler.scala
index b3d3a5df5f..727f9e3529 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/RejectionHandler.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/RejectionHandler.scala
@@ -13,7 +13,7 @@ object RejectionHandler {
* Creates a new [[RejectionHandler]] builder.
*/
def newBuilder = new RejectionHandlerBuilder(server.RejectionHandler.newBuilder)
-
+
def defaultHandler = new RejectionHandler(server.RejectionHandler.default)
}
@@ -22,7 +22,7 @@ final class RejectionHandler(val asScala: server.RejectionHandler) {
* Creates a new [[RejectionHandler]] which uses the given one as fallback for this one.
*/
def withFallback(fallback: RejectionHandler) = new RejectionHandler(asScala.withFallback(fallback.asScala))
-
+
/**
* "Seals" this handler by attaching a default handler as fallback if necessary.
*/
@@ -31,24 +31,24 @@ final class RejectionHandler(val asScala: server.RejectionHandler) {
class RejectionHandlerBuilder(asScala: server.RejectionHandler.Builder) {
def build = new RejectionHandler(asScala.result())
-
+
/**
* Handles a single [[Rejection]] with the given partial function.
*/
def handle[T <: Rejection](t: Class[T], handler: function.Function[T, Route]): RejectionHandlerBuilder = {
- asScala.handle { case r if t.isInstance(r) => handler.apply(t.cast(r)).delegate }
+ asScala.handle { case r if t.isInstance(r) ⇒ handler.apply(t.cast(r)).delegate }
this
}
-
+
/**
* Handles several Rejections of the same type at the same time.
* The list passed to the given function is guaranteed to be non-empty.
*/
def handleAll[T <: Rejection](t: Class[T], handler: function.Function[java.util.List[T], Route]): RejectionHandlerBuilder = {
- asScala.handleAll { rejections:collection.immutable.Seq[T] => handler.apply(rejections.asJava).delegate } (ClassTag(t))
+ asScala.handleAll { rejections: collection.immutable.Seq[T] ⇒ handler.apply(rejections.asJava).delegate }(ClassTag(t))
this
}
-
+
/**
* Handles the special "not found" case using the given [[Route]].
*/
@@ -58,13 +58,13 @@ class RejectionHandlerBuilder(asScala: server.RejectionHandler.Builder) {
}
/**
- * Convenience method for handling rejections created by created by the onCompleteWithBreaker directive.
- * Signals that the request was rejected because the supplied circuit breaker is open and requests are failing fast.
- *
- * Use to customise the error response being written instead of the default [[akka.http.javadsl.model.StatusCodes.SERVICE_UNAVAILABLE]] response.
- */
+ * Convenience method for handling rejections created by created by the onCompleteWithBreaker directive.
+ * Signals that the request was rejected because the supplied circuit breaker is open and requests are failing fast.
+ *
+ * Use to customise the error response being written instead of the default [[akka.http.javadsl.model.StatusCodes.SERVICE_UNAVAILABLE]] response.
+ */
def handleCircuitBreakerOpenRejection(handler: function.Function[CircuitBreakerOpenRejection, Route]): RejectionHandlerBuilder = {
- asScala.handleCircuitBreakerOpenRejection(t => handler.apply(t).delegate)
+ asScala.handleCircuitBreakerOpenRejection(t ⇒ handler.apply(t).delegate)
this
}
}
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala b/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala
index 6be926f852..a9cbe31da9 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/Rejections.scala
@@ -351,8 +351,9 @@ object Rejections {
def requestEntityExpected = RequestEntityExpectedRejection
- def unacceptedResponseContentType(supportedContentTypes: java.lang.Iterable[ContentType],
- supportedMediaTypes: java.lang.Iterable[MediaType]): UnacceptedResponseContentTypeRejection = {
+ def unacceptedResponseContentType(
+ supportedContentTypes: java.lang.Iterable[ContentType],
+ supportedMediaTypes: java.lang.Iterable[MediaType]): UnacceptedResponseContentTypeRejection = {
val s1: Set[Alternative] = supportedContentTypes.asScala.map(_.asScala).map(ct ⇒ ContentNegotiator.Alternative(ct)).toSet
val s2: Set[Alternative] = supportedMediaTypes.asScala.map(_.asScala).map(mt ⇒ ContentNegotiator.Alternative(mt)).toSet
s.UnacceptedResponseContentTypeRejection(s1 ++ s2)
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/RequestContext.scala b/akka-http/src/main/scala/akka/http/javadsl/server/RequestContext.scala
index 0d60b59447..26c474f11b 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/RequestContext.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/RequestContext.scala
@@ -35,9 +35,9 @@ class RequestContext private (val delegate: scaladsl.server.RequestContext) {
def reconfigure(
executionContext: ExecutionContextExecutor,
- materializer: Materializer,
- log: LoggingAdapter,
- settings: RoutingSettings): RequestContext = wrap(delegate.reconfigure(executionContext, materializer, log, settings.asScala))
+ materializer: Materializer,
+ log: LoggingAdapter,
+ settings: RoutingSettings): RequestContext = wrap(delegate.reconfigure(executionContext, materializer, log, settings.asScala))
def complete[T](value: T, marshaller: Marshaller[T, HttpResponse]): CompletionStage[RouteResult] = {
delegate.complete(ToResponseMarshallable(value)(marshaller))
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/Route.scala b/akka-http/src/main/scala/akka/http/javadsl/server/Route.scala
index 009fc2926c..e16cc0a2f0 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/Route.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/Route.scala
@@ -47,12 +47,13 @@ trait Route {
/**
* "Seals" a route by wrapping it with explicit exception handling and rejection conversion.
*/
- def seal(routingSettings: RoutingSettings,
- parserSettings: ParserSettings,
- rejectionHandler: RejectionHandler,
- exceptionHandler: ExceptionHandler,
- system: ActorSystem,
- materializer: Materializer): Route
+ def seal(
+ routingSettings: RoutingSettings,
+ parserSettings: ParserSettings,
+ rejectionHandler: RejectionHandler,
+ exceptionHandler: ExceptionHandler,
+ system: ActorSystem,
+ materializer: Materializer): Route
def orElse(alternative: Route): Route
}
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/Unmarshaller.scala b/akka-http/src/main/scala/akka/http/javadsl/server/Unmarshaller.scala
index 718b8b7ddf..7d06ea7ff9 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/Unmarshaller.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/Unmarshaller.scala
@@ -41,16 +41,14 @@ object Unmarshaller {
* Creates an unmarshaller from an asynchronous Java function.
*/
def async[A, B](f: java.util.function.Function[A, CompletionStage[B]]): Unmarshaller[A, B] =
- unmarshalling.Unmarshaller[A, B] {
- ctx ⇒ a ⇒ f(a).toScala
+ unmarshalling.Unmarshaller[A, B] { ctx ⇒ a ⇒ f(a).toScala
}
/**
* Creates an unmarshaller from a Java function.
*/
def sync[A, B](f: java.util.function.Function[A, B]): Unmarshaller[A, B] =
- unmarshalling.Unmarshaller[A, B] {
- ctx ⇒ a ⇒ scala.concurrent.Future.successful(f.apply(a))
+ unmarshalling.Unmarshaller[A, B] { ctx ⇒ a ⇒ scala.concurrent.Future.successful(f.apply(a))
}
// format: OFF
@@ -65,14 +63,13 @@ object Unmarshaller {
unmarshalling.Unmarshaller.strict[HttpRequest, RequestEntity](_.entity)
def forMediaType[B](t: MediaType, um: Unmarshaller[HttpEntity, B]): Unmarshaller[HttpEntity, B] = {
- unmarshalling.Unmarshaller.withMaterializer[HttpEntity, B] { implicit ex ⇒
- implicit mat ⇒ jEntity ⇒ {
- val entity = jEntity.asScala
- val mediaType = t.asScala
- if (entity.contentType == ContentTypes.NoContentType || mediaType.matches(entity.contentType.mediaType)) {
- um.asScala(entity)
- } else FastFuture.failed(UnsupportedContentTypeException(ContentTypeRange(t.toRange.asScala)))
- }
+ unmarshalling.Unmarshaller.withMaterializer[HttpEntity, B] { implicit ex ⇒ implicit mat ⇒ jEntity ⇒ {
+ val entity = jEntity.asScala
+ val mediaType = t.asScala
+ if (entity.contentType == ContentTypes.NoContentType || mediaType.matches(entity.contentType.mediaType)) {
+ um.asScala(entity)
+ } else FastFuture.failed(UnsupportedContentTypeException(ContentTypeRange(t.toRange.asScala)))
+ }
}
}
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/DebuggingDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/DebuggingDirectives.scala
index e8bab166df..32865014e0 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/DebuggingDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/DebuggingDirectives.scala
@@ -66,9 +66,10 @@ abstract class DebuggingDirectives extends CookieDirectives {
* @param showSuccess Function invoked when the route result was successful and yielded an HTTP response
* @param showRejection Function invoked when the route yielded a rejection
*/
- def logResult(showSuccess: JFunction[HttpResponse, LogEntry],
- showRejection: JFunction[JList[Rejection], LogEntry],
- inner: Supplier[Route]) = RouteAdapter {
+ def logResult(
+ showSuccess: JFunction[HttpResponse, LogEntry],
+ showRejection: JFunction[JList[Rejection], LogEntry],
+ inner: Supplier[Route]) = RouteAdapter {
D.logResult(LoggingMagnet.forMessageFromFullShow {
case RouteResult.Complete(response) ⇒ showSuccess.apply(response).asScala
case RouteResult.Rejected(rejections) ⇒ showRejection.apply(rejections.asJava).asScala
@@ -83,9 +84,10 @@ abstract class DebuggingDirectives extends CookieDirectives {
* @param showSuccess Function invoked when the route result was successful and yielded an HTTP response
* @param showRejection Function invoked when the route yielded a rejection
*/
- def logRequestResult(showSuccess: BiFunction[HttpRequest, HttpResponse, LogEntry],
- showRejection: BiFunction[HttpRequest, JList[Rejection], LogEntry],
- inner: Supplier[Route]) = RouteAdapter {
+ def logRequestResult(
+ showSuccess: BiFunction[HttpRequest, HttpResponse, LogEntry],
+ showRejection: BiFunction[HttpRequest, JList[Rejection], LogEntry],
+ inner: Supplier[Route]) = RouteAdapter {
D.logRequestResult(LoggingMagnet.forRequestResponseFromFullShow(request ⇒ {
case RouteResult.Complete(response) ⇒ Some(showSuccess.apply(request, response).asScala)
case RouteResult.Rejected(rejections) ⇒ Some(showRejection.apply(request, rejections.asJava).asScala)
@@ -101,9 +103,10 @@ abstract class DebuggingDirectives extends CookieDirectives {
* @param showRejection Function invoked when the route yielded a rejection
*/
@CorrespondsTo("logRequestResult")
- def logRequestResultOptional(showSuccess: BiFunction[HttpRequest, HttpResponse, Optional[LogEntry]],
- showRejection: BiFunction[HttpRequest, JList[Rejection], Optional[LogEntry]],
- inner: Supplier[Route]) = RouteAdapter {
+ def logRequestResultOptional(
+ showSuccess: BiFunction[HttpRequest, HttpResponse, Optional[LogEntry]],
+ showRejection: BiFunction[HttpRequest, JList[Rejection], Optional[LogEntry]],
+ inner: Supplier[Route]) = RouteAdapter {
D.logRequestResult(LoggingMagnet.forRequestResponseFromFullShow(request ⇒ {
case RouteResult.Complete(response) ⇒ showSuccess.apply(request, response).asScala
case RouteResult.Rejected(rejections) ⇒ showRejection.apply(request, rejections.asJava).asScala
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala
index c650e700b1..ebc94285bb 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/HeaderDirectives.scala
@@ -4,35 +4,35 @@
package akka.http.javadsl.server.directives
import java.util.Optional
-import java.util.{function => jf}
+import java.util.{ function ⇒ jf }
import akka.actor.ReflectiveDynamicAccess
import scala.compat.java8.OptionConverters
import scala.compat.java8.OptionConverters._
import akka.http.impl.util.JavaMapping.Implicits._
-import akka.http.javadsl.model.{HttpHeader, StatusCodes}
+import akka.http.javadsl.model.{ HttpHeader, StatusCodes }
import akka.http.javadsl.model.headers.HttpOriginRange
-import akka.http.javadsl.server.{InvalidOriginRejection, MissingHeaderRejection, Route}
-import akka.http.scaladsl.model.headers.{ModeledCustomHeader, ModeledCustomHeaderCompanion, Origin}
-import akka.http.scaladsl.server.directives.{HeaderMagnet, BasicDirectives => B, HeaderDirectives => D}
+import akka.http.javadsl.server.{ InvalidOriginRejection, MissingHeaderRejection, Route }
+import akka.http.scaladsl.model.headers.{ ModeledCustomHeader, ModeledCustomHeaderCompanion, Origin }
+import akka.http.scaladsl.server.directives.{ HeaderMagnet, BasicDirectives ⇒ B, HeaderDirectives ⇒ D }
import akka.stream.ActorMaterializer
import scala.reflect.ClassTag
-import scala.util.{Failure, Success}
+import scala.util.{ Failure, Success }
abstract class HeaderDirectives extends FutureDirectives {
private type ScalaHeaderMagnet = HeaderMagnet[akka.http.scaladsl.model.HttpHeader]
/**
- * Checks that request comes from the same origin. Extracts the [[Origin]] header value and verifies that
- * allowed range contains the obtained value. In the case of absent of the [[Origin]] header rejects
- * with [[MissingHeaderRejection]]. If the origin value is not in the allowed range
- * rejects with an [[InvalidOriginRejection]] and [[StatusCodes.FORBIDDEN]] status.
- *
- * @group header
- */
+ * Checks that request comes from the same origin. Extracts the [[Origin]] header value and verifies that
+ * allowed range contains the obtained value. In the case of absent of the [[Origin]] header rejects
+ * with [[MissingHeaderRejection]]. If the origin value is not in the allowed range
+ * rejects with an [[InvalidOriginRejection]] and [[StatusCodes.FORBIDDEN]] status.
+ *
+ * @group header
+ */
def checkSameOrigin(allowed: HttpOriginRange, inner: jf.Supplier[Route]): Route = RouteAdapter {
D.checkSameOrigin(allowed.asScala) { inner.get().delegate }
}
@@ -43,27 +43,27 @@ abstract class HeaderDirectives extends FutureDirectives {
* with a [[akka.http.javadsl.server.MalformedHeaderRejection]].
*/
def headerValue[T](f: jf.Function[HttpHeader, Optional[T]], inner: jf.Function[T, Route]) = RouteAdapter {
- D.headerValue(h => f.apply(h).asScala) { value =>
+ D.headerValue(h ⇒ f.apply(h).asScala) { value ⇒
inner.apply(value).delegate
}
}
-
+
/**
* Extracts an HTTP header value using the given partial function. If the function is undefined for all headers the
* request is rejected with an empty rejection set.
*/
def headerValuePF[T](pf: PartialFunction[HttpHeader, T], inner: jf.Function[T, Route]) = RouteAdapter {
- D.headerValuePF(pf) { value =>
+ D.headerValuePF(pf) { value ⇒
inner.apply(value).delegate
}
}
-
+
/**
* Extracts the value of the first HTTP request header with the given name.
* If no header with a matching name is found the request is rejected with a [[akka.http.javadsl.server.MissingHeaderRejection]].
*/
def headerValueByName(headerName: String, inner: jf.Function[String, Route]) = RouteAdapter {
- D.headerValueByName(headerName) { value =>
+ D.headerValueByName(headerName) { value ⇒
inner.apply(value).delegate
}
}
@@ -78,15 +78,15 @@ abstract class HeaderDirectives extends FutureDirectives {
// figure out the modeled header companion and use that to parse the header
val refl = new ReflectiveDynamicAccess(getClass.getClassLoader)
refl.getObjectFor[ModeledCustomHeaderCompanion[_]](t.getName) match {
- case Success(companion) =>
+ case Success(companion) ⇒
new HeaderMagnet[T] {
override def classTag = ClassTag(t)
override def runtimeClass = t
override def extractPF = {
- case h if h.is(companion.lowercaseName) => companion.apply(h.toString).asInstanceOf[T]
+ case h if h.is(companion.lowercaseName) ⇒ companion.apply(h.toString).asInstanceOf[T]
}
}
- case Failure(ex) => throw new RuntimeException(s"Failed to find or access the ModeledCustomHeaderCompanion for [${t.getName}]", ex)
+ case Failure(ex) ⇒ throw new RuntimeException(s"Failed to find or access the ModeledCustomHeaderCompanion for [${t.getName}]", ex)
}
}
@@ -94,39 +94,39 @@ abstract class HeaderDirectives extends FutureDirectives {
if (classOf[ModeledCustomHeader[_]].isAssignableFrom(t)) magnetForModeledCustomHeader(t)
else HeaderMagnet.fromClassNormalJavaHeader(t)
- D.headerValueByType(magnet) { value =>
+ D.headerValueByType(magnet) { value ⇒
inner.apply(value).delegate
}
}
-
+
/**
* Extracts an optional HTTP header value using the given function.
* If the given function throws an exception the request is rejected
* with a [[akka.http.javadsl.server.MalformedHeaderRejection]].
*/
def optionalHeaderValue[T](f: jf.Function[HttpHeader, Optional[T]], inner: jf.Function[Optional[T], Route]) = RouteAdapter {
- D.optionalHeaderValue(h => f.apply(h).asScala) { value =>
+ D.optionalHeaderValue(h ⇒ f.apply(h).asScala) { value ⇒
inner.apply(value.asJava).delegate
}
}
-
+
/**
* Extracts an optional HTTP header value using the given partial function.
* If the given function throws an exception the request is rejected
* with a [[akka.http.javadsl.server.MalformedHeaderRejection]].
*/
def optionalHeaderValuePF[T](pf: PartialFunction[HttpHeader, T], inner: jf.Function[Optional[T], Route]) = RouteAdapter {
- D.optionalHeaderValuePF(pf) { value =>
+ D.optionalHeaderValuePF(pf) { value ⇒
inner.apply(value.asJava).delegate
}
}
-
+
/**
* Extracts the value of the optional HTTP request header with the given name.
*/
def optionalHeaderValueByName(headerName: String, inner: jf.Function[Optional[String], Route]) = RouteAdapter {
- D.optionalHeaderValueByName(headerName) { value =>
+ D.optionalHeaderValueByName(headerName) { value ⇒
inner.apply(value.asJava).delegate
}
}
@@ -139,10 +139,10 @@ abstract class HeaderDirectives extends FutureDirectives {
def optionalHeaderValueByType[T <: HttpHeader](t: Class[T], inner: jf.Function[Optional[T], Route]) = RouteAdapter {
// TODO custom headers don't work yet
// TODO needs instance of check if it's a modeled header and then magically locate companion
- D.optionalHeaderValueByType(HeaderMagnet.fromClassNormalJavaHeader(t).asInstanceOf[ScalaHeaderMagnet]) { value =>
+ D.optionalHeaderValueByType(HeaderMagnet.fromClassNormalJavaHeader(t).asInstanceOf[ScalaHeaderMagnet]) { value ⇒
val valueT = value.asInstanceOf[Option[T]] // we know this is safe because T <: HttpHeader
inner.apply(OptionConverters.toJava[T](valueT)).delegate
}
}
-
+
}
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/MarshallingDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/MarshallingDirectives.scala
index b358173db0..8bad98b2d3 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/MarshallingDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/MarshallingDirectives.scala
@@ -16,8 +16,9 @@ abstract class MarshallingDirectives extends HostDirectives {
* If there is a problem with unmarshalling the request is rejected with the [[akka.http.javadsl.server.Rejection]]
* produced by the unmarshaller.
*/
- def request[T](unmarshaller: Unmarshaller[_ >: HttpRequest, T],
- inner: java.util.function.Function[T, Route]): Route = RouteAdapter {
+ def request[T](
+ unmarshaller: Unmarshaller[_ >: HttpRequest, T],
+ inner: java.util.function.Function[T, Route]): Route = RouteAdapter {
D.entity(unmarshaller.asScala) { value ⇒
inner.apply(value).delegate
}
@@ -28,8 +29,9 @@ abstract class MarshallingDirectives extends HostDirectives {
* If there is a problem with unmarshalling the request is rejected with the [[akka.http.javadsl.server.Rejection]]
* produced by the unmarshaller.
*/
- def entity[T](unmarshaller: Unmarshaller[_ >: HttpEntity, T],
- inner: java.util.function.Function[T, Route]): Route = RouteAdapter {
+ def entity[T](
+ unmarshaller: Unmarshaller[_ >: HttpEntity, T],
+ inner: java.util.function.Function[T, Route]): Route = RouteAdapter {
D.entity(Unmarshaller.requestToEntity.flatMap(unmarshaller).asScala) { value ⇒
inner.apply(value).delegate
}
diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/SecurityDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/SecurityDirectives.scala
index 1500da97e5..f25d08aa42 100644
--- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/SecurityDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/SecurityDirectives.scala
@@ -5,16 +5,16 @@ package akka.http.javadsl.server.directives
import java.util.Optional
import java.util.concurrent.CompletionStage
-import java.util.function.{Function => JFunction}
+import java.util.function.{ Function ⇒ JFunction }
import java.util.function.Supplier
import scala.compat.java8.FutureConverters._
import scala.compat.java8.OptionConverters._
import akka.http.javadsl.model.headers.HttpChallenge
import akka.http.javadsl.model.headers.HttpCredentials
-import akka.http.javadsl.server.{RequestContext, Route}
+import akka.http.javadsl.server.{ RequestContext, Route }
import akka.http.scaladsl
-import akka.http.scaladsl.server.{AuthorizationFailedRejection, Directives => D}
+import akka.http.scaladsl.server.{ AuthorizationFailedRejection, Directives ⇒ D }
object SecurityDirectives {
/**
@@ -25,19 +25,19 @@ object SecurityDirectives {
* The username or token provided with the credentials
*/
def identifier: String = asScala.identifier
-
+
/**
* Safely compares the passed in `secret` with the received secret part of the Credentials.
* Use of this method instead of manual String equality testing is recommended in order to guard against timing attacks.
*
* See also [[akka.http.impl.util.EnhancedString#secure_==]], for more information.
*/
- def verify(secret: String): Boolean = asScala.verify(secret)
+ def verify(secret: String): Boolean = asScala.verify(secret)
}
-
+
private def toJava(cred: scaladsl.server.directives.Credentials): Optional[ProvidedCredentials] = cred match {
- case provided: scaladsl.server.directives.Credentials.Provided => Optional.of(ProvidedCredentials(provided))
- case _ => Optional.empty()
+ case provided: scaladsl.server.directives.Credentials.Provided ⇒ Optional.of(ProvidedCredentials(provided))
+ case _ ⇒ Optional.empty()
}
}
@@ -49,167 +49,169 @@ abstract class SecurityDirectives extends SchemeDirectives {
* Extracts the potentially present [[HttpCredentials]] provided with the request's [[akka.http.javadsl.model.headers.Authorization]] header.
*/
def extractCredentials(inner: JFunction[Optional[HttpCredentials], Route]): Route = RouteAdapter {
- D.extractCredentials { cred =>
+ D.extractCredentials { cred ⇒
inner.apply(cred.map(_.asJava).asJava).delegate // TODO attempt to not need map()
}
}
-
+
/**
* Wraps the inner route with Http Basic authentication support using a given `Authenticator[T]`.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is required in this variant, i.e. the request is rejected if [authenticator] returns Optional.empty.
*/
- def authenticateBasic[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
+ def authenticateBasic[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
inner: JFunction[T, Route]): Route = RouteAdapter {
- D.authenticateBasic(realm, c => authenticator.apply(toJava(c)).asScala) { t =>
+ D.authenticateBasic(realm, c ⇒ authenticator.apply(toJava(c)).asScala) { t ⇒
inner.apply(t).delegate
}
}
-
+
/**
* Wraps the inner route with Http Basic authentication support using a given `Authenticator[T]`.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is optional in this variant.
*/
@CorrespondsTo("authenticateBasic")
- def authenticateBasicOptional[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
+ def authenticateBasicOptional[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
inner: JFunction[Optional[T], Route]): Route = RouteAdapter {
- D.authenticateBasic(realm, c => authenticator.apply(toJava(c)).asScala).optional { t =>
+ D.authenticateBasic(realm, c ⇒ authenticator.apply(toJava(c)).asScala).optional { t ⇒
inner.apply(t.asJava).delegate
}
}
-
+
/**
* Wraps the inner route with Http Basic authentication support.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is required in this variant, i.e. the request is rejected if [authenticator] returns Optional.empty.
*/
- def authenticateBasicAsync[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], CompletionStage[Optional[T]]],
+ def authenticateBasicAsync[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], CompletionStage[Optional[T]]],
inner: JFunction[T, Route]): Route = RouteAdapter {
- D.extractExecutionContext { implicit ctx =>
- D.authenticateBasicAsync(realm, c => authenticator.apply(toJava(c)).toScala.map(_.asScala)) { t =>
+ D.extractExecutionContext { implicit ctx ⇒
+ D.authenticateBasicAsync(realm, c ⇒ authenticator.apply(toJava(c)).toScala.map(_.asScala)) { t ⇒
inner.apply(t).delegate
}
}
}
-
+
/**
* Wraps the inner route with Http Basic authentication support.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is optional in this variant.
*/
@CorrespondsTo("authenticateBasicAsync")
- def authenticateBasicAsyncOptional[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], CompletionStage[Optional[T]]],
+ def authenticateBasicAsyncOptional[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], CompletionStage[Optional[T]]],
inner: JFunction[Optional[T], Route]): Route = RouteAdapter {
- D.extractExecutionContext { implicit ctx =>
- D.authenticateBasicAsync(realm, c => authenticator.apply(toJava(c)).toScala.map(_.asScala)).optional { t =>
+ D.extractExecutionContext { implicit ctx ⇒
+ D.authenticateBasicAsync(realm, c ⇒ authenticator.apply(toJava(c)).toScala.map(_.asScala)).optional { t ⇒
inner.apply(t.asJava).delegate
}
}
}
-
+
/**
* A directive that wraps the inner route with OAuth2 Bearer Token authentication support.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is required in this variant, i.e. the request is rejected if [authenticator] returns Optional.empty.
*/
- def authenticateOAuth2[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
+ def authenticateOAuth2[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
inner: JFunction[T, Route]): Route = RouteAdapter {
- D.authenticateOAuth2(realm, c => authenticator.apply(toJava(c)).asScala) { t =>
+ D.authenticateOAuth2(realm, c ⇒ authenticator.apply(toJava(c)).asScala) { t ⇒
inner.apply(t).delegate
}
}
-
+
/**
* A directive that wraps the inner route with OAuth2 Bearer Token authentication support.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is optional in this variant.
*/
@CorrespondsTo("authenticateOAuth2")
- def authenticateOAuth2Optional[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
+ def authenticateOAuth2Optional[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], Optional[T]],
inner: JFunction[Optional[T], Route]): Route = RouteAdapter {
- D.authenticateOAuth2(realm, c => authenticator.apply(toJava(c)).asScala).optional { t =>
+ D.authenticateOAuth2(realm, c ⇒ authenticator.apply(toJava(c)).asScala).optional { t ⇒
inner.apply(t.asJava).delegate
}
}
-
+
/**
* A directive that wraps the inner route with OAuth2 Bearer Token authentication support.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is required in this variant, i.e. the request is rejected if [authenticator] returns Optional.empty.
*/
- def authenticateOAuth2Async[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], CompletionStage[Optional[T]]],
- inner: JFunction[T, Route]): Route = RouteAdapter {
- D.extractExecutionContext { implicit ctx =>
- D.authenticateOAuth2Async(realm, c => authenticator.apply(toJava(c)).toScala.map(_.asScala)) { t =>
+ def authenticateOAuth2Async[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], CompletionStage[Optional[T]]],
+ inner: JFunction[T, Route]): Route = RouteAdapter {
+ D.extractExecutionContext { implicit ctx ⇒
+ D.authenticateOAuth2Async(realm, c ⇒ authenticator.apply(toJava(c)).toScala.map(_.asScala)) { t ⇒
inner.apply(t).delegate
}
}
}
-
+
/**
* A directive that wraps the inner route with OAuth2 Bearer Token authentication support.
* The given authenticator determines whether the credentials in the request are valid
* and, if so, which user object to supply to the inner route.
- *
+ *
* Authentication is optional in this variant.
*/
@CorrespondsTo("authenticateOAuth2Async")
def authenticateOAuth2AsyncOptional[T](realm: String, authenticator: JFunction[Optional[ProvidedCredentials], CompletionStage[Optional[T]]],
inner: JFunction[Optional[T], Route]): Route = RouteAdapter {
- D.extractExecutionContext { implicit ctx =>
- D.authenticateOAuth2Async(realm, c => authenticator.apply(toJava(c)).toScala.map(_.asScala)).optional { t =>
+ D.extractExecutionContext { implicit ctx ⇒
+ D.authenticateOAuth2Async(realm, c ⇒ authenticator.apply(toJava(c)).toScala.map(_.asScala)).optional { t ⇒
inner.apply(t.asJava).delegate
}
}
}
-
+
/**
* Lifts an authenticator function into a directive. The authenticator function gets passed in credentials from the
* [[akka.http.javadsl.model.headers.Authorization]] header of the request. If the function returns `Right(user)` the user object is provided
* to the inner route. If the function returns `Left(challenge)` the request is rejected with an
* [[akka.http.javadsl.server.AuthenticationFailedRejection]] that contains this challenge to be added to the response.
*/
- def authenticateOrRejectWithChallenge[T](authenticator: JFunction[Optional[HttpCredentials], CompletionStage[Either[HttpChallenge,T]]],
- inner: JFunction[T, Route]): Route = RouteAdapter {
- D.extractExecutionContext { implicit ctx =>
- val scalaAuthenticator = { cred: Option[scaladsl.model.headers.HttpCredentials] =>
+ def authenticateOrRejectWithChallenge[T](
+ authenticator: JFunction[Optional[HttpCredentials], CompletionStage[Either[HttpChallenge, T]]],
+ inner: JFunction[T, Route]): Route = RouteAdapter {
+ D.extractExecutionContext { implicit ctx ⇒
+ val scalaAuthenticator = { cred: Option[scaladsl.model.headers.HttpCredentials] ⇒
authenticator.apply(cred.map(_.asJava).asJava).toScala.map(_.left.map(_.asScala))
}
-
- D.authenticateOrRejectWithChallenge(scalaAuthenticator) { t =>
+
+ D.authenticateOrRejectWithChallenge(scalaAuthenticator) { t ⇒
inner.apply(t).delegate
}
}
}
-
+
/**
* Lifts an authenticator function into a directive. Same as `authenticateOrRejectWithChallenge`
* but only applies the authenticator function with a certain type of credentials.
*/
- def authenticateOrRejectWithChallenge[C <: HttpCredentials, T](c: Class[C],
- authenticator: JFunction[Optional[C], CompletionStage[Either[HttpChallenge,T]]],
- inner: JFunction[T, Route]): Route = RouteAdapter {
- D.extractExecutionContext { implicit ctx =>
- val scalaAuthenticator = { cred: Option[scaladsl.model.headers.HttpCredentials] =>
+ def authenticateOrRejectWithChallenge[C <: HttpCredentials, T](
+ c: Class[C],
+ authenticator: JFunction[Optional[C], CompletionStage[Either[HttpChallenge, T]]],
+ inner: JFunction[T, Route]): Route = RouteAdapter {
+ D.extractExecutionContext { implicit ctx ⇒
+ val scalaAuthenticator = { cred: Option[scaladsl.model.headers.HttpCredentials] ⇒
authenticator.apply(cred.filter(c.isInstance).map(_.asJava).asJava.asInstanceOf[Optional[C]]).toScala.map(_.left.map(_.asScala)) // TODO make sure cast is safe
}
-
- D.authenticateOrRejectWithChallenge(scalaAuthenticator) { t =>
+
+ D.authenticateOrRejectWithChallenge(scalaAuthenticator) { t ⇒
inner.apply(t).delegate
}
}
@@ -231,7 +233,7 @@ abstract class SecurityDirectives extends SchemeDirectives {
*/
@CorrespondsTo("authorize")
def authorizeWithRequestContext(check: akka.japi.function.Function[RequestContext, Boolean], inner: Supplier[Route]): Route = RouteAdapter {
- D.authorize(rc => check(RequestContext.wrap(rc)))(inner.get().delegate)
+ D.authorize(rc ⇒ check(RequestContext.wrap(rc)))(inner.get().delegate)
}
/**
@@ -251,12 +253,12 @@ abstract class SecurityDirectives extends SchemeDirectives {
*/
@CorrespondsTo("authorizeAsync")
def authorizeAsyncWithRequestContext(check: akka.japi.function.Function[RequestContext, CompletionStage[Boolean]], inner: Supplier[Route]): Route = RouteAdapter {
- D.authorizeAsync(rc => check(RequestContext.wrap(rc)).toScala)(inner.get().delegate)
+ D.authorizeAsync(rc ⇒ check(RequestContext.wrap(rc)).toScala)(inner.get().delegate)
}
/**
* Creates a `Basic` [[HttpChallenge]] for the given realm.
*/
def challengeFor(realm: String): HttpChallenge = HttpChallenge.create("Basic", realm)
-
+
}
\ No newline at end of file
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala b/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala
index dd905691c9..d91ecdc2c6 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala
@@ -7,7 +7,7 @@ package akka.http.scaladsl.coding
import akka.NotUsed
import akka.http.scaladsl.model._
import akka.stream.{ FlowShape, Materializer }
-import akka.stream.stage.{ GraphStage}
+import akka.stream.stage.{ GraphStage }
import akka.util.ByteString
import headers.HttpEncoding
import akka.stream.scaladsl.{ Sink, Source, Flow }
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala b/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala
index 8613630845..59c4dd90c6 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala
@@ -125,5 +125,5 @@ private[http] object GzipDecompressor {
0, // MTIME 4
0, // XFL
0 // OS
- )
+ )
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala b/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala
index b48518182e..6e74ad9ae4 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala
@@ -7,7 +7,7 @@ package akka.http.scaladsl.coding
import akka.http.scaladsl.model._
import akka.http.impl.util.StreamUtils
import akka.stream.FlowShape
-import akka.stream.stage.{ GraphStage}
+import akka.stream.stage.{ GraphStage }
import akka.util.ByteString
import headers.HttpEncodings
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala b/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala
index 147b9a216f..124746ccd5 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala
@@ -91,38 +91,38 @@ object StrictForm {
}
}
- implicit def unmarshaller(implicit formDataUM: FromEntityUnmarshaller[FormData],
+ implicit def unmarshaller(implicit
+ formDataUM: FromEntityUnmarshaller[FormData],
multipartUM: FromEntityUnmarshaller[Multipart.FormData]): FromEntityUnmarshaller[StrictForm] =
- Unmarshaller.withMaterializer { implicit ec ⇒
- implicit fm ⇒
- entity ⇒
+ Unmarshaller.withMaterializer { implicit ec ⇒ implicit fm ⇒
+ entity ⇒
- def tryUnmarshalToQueryForm: Future[StrictForm] =
- for (formData ← formDataUM(entity).fast) yield {
- new StrictForm {
- val fields = formData.fields.map { case (name, value) ⇒ name -> Field.FromString(value) }(collection.breakOut)
- }
+ def tryUnmarshalToQueryForm: Future[StrictForm] =
+ for (formData ← formDataUM(entity).fast) yield {
+ new StrictForm {
+ val fields = formData.fields.map { case (name, value) ⇒ name → Field.FromString(value) }(collection.breakOut)
}
-
- def tryUnmarshalToMultipartForm: Future[StrictForm] =
- for {
- multiPartFD ← multipartUM(entity).fast
- strictMultiPartFD ← multiPartFD.toStrict(10.seconds).fast // TODO: make timeout configurable
- } yield {
- new StrictForm {
- val fields = strictMultiPartFD.strictParts.map {
- case x: Multipart.FormData.BodyPart.Strict ⇒ x.name -> Field.FromPart(x)
- }(collection.breakOut)
- }
- }
-
- tryUnmarshalToQueryForm.fast.recoverWith {
- case Unmarshaller.UnsupportedContentTypeException(supported1) ⇒
- tryUnmarshalToMultipartForm.fast.recoverWith {
- case Unmarshaller.UnsupportedContentTypeException(supported2) ⇒
- FastFuture.failed(Unmarshaller.UnsupportedContentTypeException(supported1 ++ supported2))
- }
}
+
+ def tryUnmarshalToMultipartForm: Future[StrictForm] =
+ for {
+ multiPartFD ← multipartUM(entity).fast
+ strictMultiPartFD ← multiPartFD.toStrict(10.seconds).fast // TODO: make timeout configurable
+ } yield {
+ new StrictForm {
+ val fields = strictMultiPartFD.strictParts.map {
+ case x: Multipart.FormData.BodyPart.Strict ⇒ x.name → Field.FromPart(x)
+ }(collection.breakOut)
+ }
+ }
+
+ tryUnmarshalToQueryForm.fast.recoverWith {
+ case Unmarshaller.UnsupportedContentTypeException(supported1) ⇒
+ tryUnmarshalToMultipartForm.fast.recoverWith {
+ case Unmarshaller.UnsupportedContentTypeException(supported2) ⇒
+ FastFuture.failed(Unmarshaller.UnsupportedContentTypeException(supported1 ++ supported2))
+ }
+ }
}
/**
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala
index 4c78a9a40a..b7b74df025 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala
@@ -21,7 +21,7 @@ object ContentTypeOverrider {
implicit def forHeadersAndEntity[T <: HttpEntity]: ContentTypeOverrider[(immutable.Seq[HttpHeader], T)] =
new ContentTypeOverrider[(immutable.Seq[HttpHeader], T)] {
def apply(value: (immutable.Seq[HttpHeader], T), newContentType: ContentType) =
- value._1 -> value._2.withContentType(newContentType).asInstanceOf[T]
+ value._1 → value._2.withContentType(newContentType).asInstanceOf[T]
}
implicit val forResponse: ContentTypeOverrider[HttpResponse] =
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala
index d10d993fb7..28dd4fc8b8 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala
@@ -14,7 +14,7 @@ object EmptyValue {
new EmptyValue[UniversalEntity](HttpEntity.Empty)
implicit val emptyHeadersAndEntity: EmptyValue[(immutable.Seq[HttpHeader], UniversalEntity)] =
- new EmptyValue[(immutable.Seq[HttpHeader], UniversalEntity)](Nil -> HttpEntity.Empty)
+ new EmptyValue[(immutable.Seq[HttpHeader], UniversalEntity)](Nil → HttpEntity.Empty)
implicit val emptyResponse: EmptyValue[HttpResponse] =
new EmptyValue[HttpResponse](HttpResponse(entity = emptyEntity.emptyValue))
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala
index 1974bc8000..81c0290448 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala
@@ -36,36 +36,35 @@ sealed abstract class Marshaller[-A, +B] {
* If the wrapping is illegal the [[scala.concurrent.Future]] produced by the resulting marshaller will contain a [[RuntimeException]].
*/
def wrapWithEC[C, D >: B](newMediaType: MediaType)(f: ExecutionContext ⇒ C ⇒ A)(implicit cto: ContentTypeOverrider[D]): Marshaller[C, D] =
- Marshaller { implicit ec ⇒
- value ⇒
- import Marshalling._
- this(f(ec)(value)).fast map {
- _ map {
- (_, newMediaType) match {
- case (WithFixedContentType(_, marshal), newMT: MediaType.Binary) ⇒
- WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
- case (WithFixedContentType(oldCT: ContentType.Binary, marshal), newMT: MediaType.WithFixedCharset) ⇒
- WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
- case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithFixedCharset) if oldCT.charset == newMT.charset ⇒
- WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
- case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithOpenCharset) ⇒
- val newCT = newMT withCharset oldCT.charset
- WithFixedContentType(newCT, () ⇒ cto(marshal(), newCT))
+ Marshaller { implicit ec ⇒ value ⇒
+ import Marshalling._
+ this(f(ec)(value)).fast map {
+ _ map {
+ (_, newMediaType) match {
+ case (WithFixedContentType(_, marshal), newMT: MediaType.Binary) ⇒
+ WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
+ case (WithFixedContentType(oldCT: ContentType.Binary, marshal), newMT: MediaType.WithFixedCharset) ⇒
+ WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
+ case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithFixedCharset) if oldCT.charset == newMT.charset ⇒
+ WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
+ case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithOpenCharset) ⇒
+ val newCT = newMT withCharset oldCT.charset
+ WithFixedContentType(newCT, () ⇒ cto(marshal(), newCT))
- case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithOpenCharset) ⇒
- WithOpenCharset(newMT, cs ⇒ cto(marshal(cs), newMT withCharset cs))
- case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithFixedCharset) ⇒
- WithFixedContentType(newMT, () ⇒ cto(marshal(newMT.charset), newMT))
+ case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithOpenCharset) ⇒
+ WithOpenCharset(newMT, cs ⇒ cto(marshal(cs), newMT withCharset cs))
+ case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithFixedCharset) ⇒
+ WithFixedContentType(newMT, () ⇒ cto(marshal(newMT.charset), newMT))
- case (Opaque(marshal), newMT: MediaType.Binary) ⇒
- WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
- case (Opaque(marshal), newMT: MediaType.WithFixedCharset) ⇒
- WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
+ case (Opaque(marshal), newMT: MediaType.Binary) ⇒
+ WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
+ case (Opaque(marshal), newMT: MediaType.WithFixedCharset) ⇒
+ WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT))
- case x ⇒ sys.error(s"Illegal marshaller wrapping. Marshalling `$x` cannot be wrapped with MediaType `$newMediaType`")
- }
+ case x ⇒ sys.error(s"Illegal marshaller wrapping. Marshalling `$x` cannot be wrapped with MediaType `$newMediaType`")
}
}
+ }
}
def compose[C](f: C ⇒ A): Marshaller[C, B] =
@@ -152,16 +151,18 @@ object Marshalling {
/**
* A Marshalling to a specific [[akka.http.scaladsl.model.ContentType]].
*/
- final case class WithFixedContentType[A](contentType: ContentType,
- marshal: () ⇒ A) extends Marshalling[A] {
+ final case class WithFixedContentType[A](
+ contentType: ContentType,
+ marshal: () ⇒ A) extends Marshalling[A] {
def map[B](f: A ⇒ B): WithFixedContentType[B] = copy(marshal = () ⇒ f(marshal()))
}
/**
* A Marshalling to a specific [[akka.http.scaladsl.model.MediaType]] with a flexible charset.
*/
- final case class WithOpenCharset[A](mediaType: MediaType.WithOpenCharset,
- marshal: HttpCharset ⇒ A) extends Marshalling[A] {
+ final case class WithOpenCharset[A](
+ mediaType: MediaType.WithOpenCharset,
+ marshal: HttpCharset ⇒ A) extends Marshalling[A] {
def map[B](f: A ⇒ B): WithOpenCharset[B] = copy(marshal = cs ⇒ f(marshal(cs)))
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala
index fa612d828f..62777103a2 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala
@@ -15,9 +15,11 @@ trait PredefinedToResponseMarshallers extends LowPriorityToResponseMarshallerImp
private type TRM[T] = ToResponseMarshaller[T] // brevity alias
- def fromToEntityMarshaller[T](status: StatusCode = StatusCodes.OK,
- headers: immutable.Seq[HttpHeader] = Nil)(
- implicit m: ToEntityMarshaller[T]): ToResponseMarshaller[T] =
+ def fromToEntityMarshaller[T](
+ status: StatusCode = StatusCodes.OK,
+ headers: immutable.Seq[HttpHeader] = Nil)(
+ implicit
+ m: ToEntityMarshaller[T]): ToResponseMarshaller[T] =
fromStatusCodeAndHeadersAndValue compose (t ⇒ (status, headers, t))
implicit val fromResponse: TRM[HttpResponse] = Marshaller.opaque(ConstantFun.scalaIdentityFunction)
@@ -30,7 +32,8 @@ trait PredefinedToResponseMarshallers extends LowPriorityToResponseMarshallerImp
implicit def fromStatusCodeAndValue[S, T](implicit sConv: S ⇒ StatusCode, mt: ToEntityMarshaller[T]): TRM[(S, T)] =
fromStatusCodeAndHeadersAndValue[T] compose { case (status, value) ⇒ (sConv(status), Nil, value) }
- implicit def fromStatusCodeConvertibleAndHeadersAndT[S, T](implicit sConv: S ⇒ StatusCode,
+ implicit def fromStatusCodeConvertibleAndHeadersAndT[S, T](implicit
+ sConv: S ⇒ StatusCode,
mt: ToEntityMarshaller[T]): TRM[(S, immutable.Seq[HttpHeader], T)] =
fromStatusCodeAndHeadersAndValue[T] compose { case (status, headers, value) ⇒ (sConv(status), headers, value) }
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala
index 1986b0d3d3..a9d3125391 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala
@@ -44,12 +44,11 @@ abstract class Directive[L](implicit val ev: Tuple[L]) {
def as[A](constructor: ConstructFromTuple[L, A]): Directive1[A] = {
def validatedMap[R](f: L ⇒ R)(implicit tupler: Tupler[R]): Directive[tupler.Out] =
Directive[tupler.Out] { inner ⇒
- tapply { values ⇒
- ctx ⇒
- try inner(tupler(f(values)))(ctx)
- catch {
- case e: IllegalArgumentException ⇒ ctx.reject(ValidationRejection(e.getMessage.nullAsEmpty, Some(e)))
- }
+ tapply { values ⇒ ctx ⇒
+ try inner(tupler(f(values)))(ctx)
+ catch {
+ case e: IllegalArgumentException ⇒ ctx.reject(ValidationRejection(e.getMessage.nullAsEmpty, Some(e)))
+ }
}
}(tupler.OutIsTuple)
@@ -88,14 +87,13 @@ abstract class Directive[L](implicit val ev: Tuple[L]) {
* **before the inner route was applied**.
*/
def recover[R >: L: Tuple](recovery: immutable.Seq[Rejection] ⇒ Directive[R]): Directive[R] =
- Directive[R] { inner ⇒
- ctx ⇒
- import ctx.executionContext
- @volatile var rejectedFromInnerRoute = false
- tapply({ list ⇒ c ⇒ rejectedFromInnerRoute = true; inner(list)(c) })(ctx).fast.flatMap {
- case RouteResult.Rejected(rejections) if !rejectedFromInnerRoute ⇒ recovery(rejections).tapply(inner)(ctx)
- case x ⇒ FastFuture.successful(x)
- }
+ Directive[R] { inner ⇒ ctx ⇒
+ import ctx.executionContext
+ @volatile var rejectedFromInnerRoute = false
+ tapply({ list ⇒ c ⇒ rejectedFromInnerRoute = true; inner(list)(c) })(ctx).fast.flatMap {
+ case RouteResult.Rejected(rejections) if !rejectedFromInnerRoute ⇒ recovery(rejections).tapply(inner)(ctx)
+ case x ⇒ FastFuture.successful(x)
+ }
}
/**
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/ExceptionHandler.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/ExceptionHandler.scala
index 907bcbd282..10fc8e10ed 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/ExceptionHandler.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/ExceptionHandler.scala
@@ -40,7 +40,8 @@ object ExceptionHandler {
def default(settings: RoutingSettings): ExceptionHandler =
apply(knownToBeSealed = true) {
case IllegalRequestException(info, status) ⇒ ctx ⇒ {
- ctx.log.warning("Illegal request {}\n\t{}\n\tCompleting with '{}' response",
+ ctx.log.warning(
+ "Illegal request {}\n\t{}\n\tCompleting with '{}' response",
ctx.request, info.formatPretty, status)
ctx.complete((status, info.format(settings.verboseErrorMessages)))
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala
index f816a6d22f..acdb5715af 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala
@@ -78,13 +78,13 @@ object RejectionHandler {
}
/**
- * Convenience method for handling rejections created by created by the onCompleteWithBreaker directive.
- * Signals that the request was rejected because the supplied circuit breaker is open and requests are failing fast.
- *
- * Use to customise the error response being written instead of the default [[ServiceUnavailable]] response.
- */
- def handleCircuitBreakerOpenRejection(handler: CircuitBreakerOpenRejection => Route): this.type =
- handle { case r: CircuitBreakerOpenRejection => handler(r) }
+ * Convenience method for handling rejections created by created by the onCompleteWithBreaker directive.
+ * Signals that the request was rejected because the supplied circuit breaker is open and requests are failing fast.
+ *
+ * Use to customise the error response being written instead of the default [[ServiceUnavailable]] response.
+ */
+ def handleCircuitBreakerOpenRejection(handler: CircuitBreakerOpenRejection ⇒ Route): this.type =
+ handle { case r: CircuitBreakerOpenRejection ⇒ handler(r) }
def result(): RejectionHandler =
new BuiltRejectionHandler(cases.result(), notFound, isDefault)
@@ -98,9 +98,10 @@ object RejectionHandler {
def apply(rejection: Rejection) = rejection.asInstanceOf[T]
}
- private class BuiltRejectionHandler(val cases: Vector[Handler],
- val notFound: Option[Route],
- val isDefault: Boolean) extends RejectionHandler {
+ private class BuiltRejectionHandler(
+ val cases: Vector[Handler],
+ val notFound: Option[Route],
+ val isDefault: Boolean) extends RejectionHandler {
def apply(rejections: immutable.Seq[Rejection]): Option[Route] =
if (rejections.nonEmpty) {
@tailrec def rec(ix: Int): Option[Route] =
@@ -130,7 +131,7 @@ object RejectionHandler {
complete((BadRequest, "Uri scheme not allowed, supported schemes: " + schemes))
}
.handleAll[MethodRejection] { rejections ⇒
- val (methods, names) = rejections.map(r ⇒ r.supported -> r.supported.name).unzip
+ val (methods, names) = rejections.map(r ⇒ r.supported → r.supported.name).unzip
complete((MethodNotAllowed, List(Allow(methods)), "HTTP method not allowed, supported methods: " + names.mkString(", ")))
}
.handle {
@@ -225,7 +226,8 @@ object RejectionHandler {
.handle { case ExpectedWebSocketRequestRejection ⇒ complete((BadRequest, "Expected WebSocket Upgrade request")) }
.handleAll[UnsupportedWebSocketSubprotocolRejection] { rejections ⇒
val supported = rejections.map(_.supportedProtocol)
- complete(HttpResponse(BadRequest,
+ complete(HttpResponse(
+ BadRequest,
entity = s"None of the websocket subprotocols offered in the request are supported. Supported are ${supported.map("'" + _ + "'").mkString(",")}.",
headers = `Sec-WebSocket-Protocol`(supported) :: Nil))
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala
index 537eac176b..37e3855cbf 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala
@@ -53,9 +53,9 @@ trait RequestContext {
*/
def reconfigure(
executionContext: ExecutionContextExecutor = executionContext,
- materializer: Materializer = materializer,
- log: LoggingAdapter = log,
- settings: RoutingSettings = settings): RequestContext
+ materializer: Materializer = materializer,
+ log: LoggingAdapter = log,
+ settings: RoutingSettings = settings): RequestContext
/**
* Completes the request with the given ToResponseMarshallable.
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala
index af291bc7cd..2e17c3cbd0 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala
@@ -17,13 +17,13 @@ import akka.http.scaladsl.util.FastFuture._
* INTERNAL API
*/
private[http] class RequestContextImpl(
- val request: HttpRequest,
- val unmatchedPath: Uri.Path,
+ val request: HttpRequest,
+ val unmatchedPath: Uri.Path,
val executionContext: ExecutionContextExecutor,
- val materializer: Materializer,
- val log: LoggingAdapter,
- val settings: RoutingSettings,
- val parserSettings: ParserSettings) extends RequestContext {
+ val materializer: Materializer,
+ val log: LoggingAdapter,
+ val settings: RoutingSettings,
+ val parserSettings: ParserSettings) extends RequestContext {
def this(request: HttpRequest, log: LoggingAdapter, settings: RoutingSettings, parserSettings: ParserSettings)(implicit ec: ExecutionContextExecutor, materializer: Materializer) =
this(request, request.uri.path, ec, materializer, log, settings, parserSettings)
@@ -97,12 +97,13 @@ private[http] class RequestContextImpl(
case _ ⇒ Future.successful(RouteResult.Rejected(UnacceptedResponseContentTypeRejection(supported) :: Nil))
}
- private def copy(request: HttpRequest = request,
- unmatchedPath: Uri.Path = unmatchedPath,
- executionContext: ExecutionContextExecutor = executionContext,
- materializer: Materializer = materializer,
- log: LoggingAdapter = log,
- routingSettings: RoutingSettings = settings,
- parserSettings: ParserSettings = parserSettings) =
+ private def copy(
+ request: HttpRequest = request,
+ unmatchedPath: Uri.Path = unmatchedPath,
+ executionContext: ExecutionContextExecutor = executionContext,
+ materializer: Materializer = materializer,
+ log: LoggingAdapter = log,
+ routingSettings: RoutingSettings = settings,
+ parserSettings: ParserSettings = parserSettings) =
new RequestContextImpl(request, unmatchedPath, executionContext, materializer, log, routingSettings, parserSettings)
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala
index de573c3e23..41f6c8bf49 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala
@@ -23,8 +23,9 @@ object Route {
/**
* "Seals" a route by wrapping it with exception handling and rejection conversion.
*/
- def seal(route: Route)(implicit routingSettings: RoutingSettings,
- parserSettings: ParserSettings = null,
+ def seal(route: Route)(implicit
+ routingSettings: RoutingSettings,
+ parserSettings: ParserSettings = null,
rejectionHandler: RejectionHandler = RejectionHandler.default,
exceptionHandler: ExceptionHandler = null): Route = {
import directives.ExecutionDirectives._
@@ -40,25 +41,27 @@ object Route {
*
* This conversion is also implicitly available through [[RouteResult#route2HandlerFlow]].
*/
- def handlerFlow(route: Route)(implicit routingSettings: RoutingSettings,
- parserSettings: ParserSettings,
- materializer: Materializer,
- routingLog: RoutingLog,
+ def handlerFlow(route: Route)(implicit
+ routingSettings: RoutingSettings,
+ parserSettings: ParserSettings,
+ materializer: Materializer,
+ routingLog: RoutingLog,
executionContext: ExecutionContextExecutor = null,
- rejectionHandler: RejectionHandler = RejectionHandler.default,
- exceptionHandler: ExceptionHandler = null): Flow[HttpRequest, HttpResponse, NotUsed] =
+ rejectionHandler: RejectionHandler = RejectionHandler.default,
+ exceptionHandler: ExceptionHandler = null): Flow[HttpRequest, HttpResponse, NotUsed] =
Flow[HttpRequest].mapAsync(1)(asyncHandler(route))
/**
* Turns a `Route` into an async handler function.
*/
- def asyncHandler(route: Route)(implicit routingSettings: RoutingSettings,
- parserSettings: ParserSettings,
- materializer: Materializer,
- routingLog: RoutingLog,
+ def asyncHandler(route: Route)(implicit
+ routingSettings: RoutingSettings,
+ parserSettings: ParserSettings,
+ materializer: Materializer,
+ routingLog: RoutingLog,
executionContext: ExecutionContextExecutor = null,
- rejectionHandler: RejectionHandler = RejectionHandler.default,
- exceptionHandler: ExceptionHandler = null): HttpRequest ⇒ Future[HttpResponse] = {
+ rejectionHandler: RejectionHandler = RejectionHandler.default,
+ exceptionHandler: ExceptionHandler = null): HttpRequest ⇒ Future[HttpResponse] = {
val effectiveEC = if (executionContext ne null) executionContext else materializer.executionContext
{
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala
index 39e18c4034..0e4b613093 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala
@@ -30,10 +30,11 @@ object RouteResult {
override def getRejections = rejections.map(r ⇒ r: javadsl.server.Rejection).toIterable.asJava
}
- implicit def route2HandlerFlow(route: Route)(implicit routingSettings: RoutingSettings,
- parserSettings: ParserSettings,
- materializer: Materializer,
- routingLog: RoutingLog,
+ implicit def route2HandlerFlow(route: Route)(implicit
+ routingSettings: RoutingSettings,
+ parserSettings: ParserSettings,
+ materializer: Materializer,
+ routingLog: RoutingLog,
executionContext: ExecutionContext = null,
rejectionHandler: RejectionHandler = RejectionHandler.default,
exceptionHandler: ExceptionHandler = null): Flow[HttpRequest, HttpResponse, NotUsed] =
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala
index 6d4ac5cf37..c274a598e1 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala
@@ -62,7 +62,7 @@ case class LoggingMagnet[T](f: LoggingAdapter ⇒ T) // # logging-magnet
object LoggingMagnet {
implicit def forMessageFromMarker[T](marker: String): LoggingMagnet[T ⇒ Unit] = // # message-magnets
- forMessageFromMarkerAndLevel[T](marker -> DebugLevel)
+ forMessageFromMarkerAndLevel[T](marker → DebugLevel)
implicit def forMessageFromMarkerAndLevel[T](markerAndLevel: (String, LogLevel)): LoggingMagnet[T ⇒ Unit] = // # message-magnets
forMessageFromFullShow[T] {
@@ -77,7 +77,7 @@ object LoggingMagnet {
LoggingMagnet(log ⇒ show(_).logTo(log))
implicit def forRequestResponseFromMarker(marker: String): LoggingMagnet[HttpRequest ⇒ RouteResult ⇒ Unit] = // # request-response-magnets
- forRequestResponseFromMarkerAndLevel(marker -> DebugLevel)
+ forRequestResponseFromMarkerAndLevel(marker → DebugLevel)
implicit def forRequestResponseFromMarkerAndLevel(markerAndLevel: (String, LogLevel)): LoggingMagnet[HttpRequest ⇒ RouteResult ⇒ Unit] = // # request-response-magnets
forRequestResponseFromFullShow {
@@ -87,10 +87,9 @@ object LoggingMagnet {
}
implicit def forRequestResponseFromFullShow(show: HttpRequest ⇒ RouteResult ⇒ Option[LogEntry]): LoggingMagnet[HttpRequest ⇒ RouteResult ⇒ Unit] = // # request-response-magnets
- LoggingMagnet { log ⇒
- request ⇒
- val showResult = show(request)
- result ⇒ showResult(result).foreach(_.logTo(log))
+ LoggingMagnet { log ⇒ request ⇒
+ val showResult = show(request)
+ result ⇒ showResult(result).foreach(_.logTo(log))
}
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala
index b803e22947..639d2ca80a 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala
@@ -25,15 +25,14 @@ trait ExecutionDirectives {
* @group execution
*/
def handleExceptions(handler: ExceptionHandler): Directive0 =
- Directive { innerRouteBuilder ⇒
- ctx ⇒
- import ctx.executionContext
- def handleException: PartialFunction[Throwable, Future[RouteResult]] =
- handler andThen (_(ctx.withAcceptAll))
- try innerRouteBuilder(())(ctx).fast.recoverWith(handleException)
- catch {
- case NonFatal(e) ⇒ handleException.applyOrElse[Throwable, Future[RouteResult]](e, throw _)
- }
+ Directive { innerRouteBuilder ⇒ ctx ⇒
+ import ctx.executionContext
+ def handleException: PartialFunction[Throwable, Future[RouteResult]] =
+ handler andThen (_(ctx.withAcceptAll))
+ try innerRouteBuilder(())(ctx).fast.recoverWith(handleException)
+ catch {
+ case NonFatal(e) ⇒ handleException.applyOrElse[Throwable, Future[RouteResult]](e, throw _)
+ }
}
/**
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala
index d084736f3f..c8edaa8bc0 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala
@@ -355,39 +355,38 @@ object DirectoryListing {
|""".stripMarginWithNewline("\n") split '$'
def directoryMarshaller(renderVanityFooter: Boolean): ToEntityMarshaller[DirectoryListing] =
- Marshaller.StringMarshaller.wrapWithEC(MediaTypes.`text/html`) { implicit ec ⇒
- listing ⇒
- val DirectoryListing(path, isRoot, files) = listing
- val filesAndNames = files.map(file ⇒ file -> file.getName).sortBy(_._2)
- val deduped = filesAndNames.zipWithIndex.flatMap {
- case (fan @ (file, name), ix) ⇒
- if (ix == 0 || filesAndNames(ix - 1)._2 != name) Some(fan) else None
- }
- val (directoryFilesAndNames, fileFilesAndNames) = deduped.partition(_._1.isDirectory)
- def maxNameLength(seq: Seq[(File, String)]) = if (seq.isEmpty) 0 else seq.map(_._2.length).max
- val maxNameLen = math.max(maxNameLength(directoryFilesAndNames) + 1, maxNameLength(fileFilesAndNames))
- val sb = new java.lang.StringBuilder
- sb.append(html(0)).append(path).append(html(1)).append(path).append(html(2))
- if (!isRoot) {
- val secondToLastSlash = path.lastIndexOf('/', path.lastIndexOf('/', path.length - 1) - 1)
- sb.append("../\n" format path.substring(0, secondToLastSlash))
- }
- def lastModified(file: File) = DateTime(file.lastModified).toIsoLikeDateTimeString
- def start(name: String) =
- sb.append("").append(name).append("")
- .append(" " * (maxNameLen - name.length))
- def renderDirectory(file: File, name: String) =
- start(name + '/').append(" ").append(lastModified(file)).append('\n')
- def renderFile(file: File, name: String) = {
- val size = akka.http.impl.util.humanReadableByteCount(file.length, si = true)
- start(name).append(" ").append(lastModified(file))
- sb.append(" ".substring(size.length)).append(size).append('\n')
- }
- for ((file, name) ← directoryFilesAndNames) renderDirectory(file, name)
- for ((file, name) ← fileFilesAndNames) renderFile(file, name)
- if (isRoot && files.isEmpty) sb.append("(no files)\n")
- sb.append(html(3))
- if (renderVanityFooter) sb.append(html(4)).append(DateTime.now.toIsoLikeDateTimeString).append(html(5))
- sb.append(html(6)).toString
+ Marshaller.StringMarshaller.wrapWithEC(MediaTypes.`text/html`) { implicit ec ⇒ listing ⇒
+ val DirectoryListing(path, isRoot, files) = listing
+ val filesAndNames = files.map(file ⇒ file → file.getName).sortBy(_._2)
+ val deduped = filesAndNames.zipWithIndex.flatMap {
+ case (fan @ (file, name), ix) ⇒
+ if (ix == 0 || filesAndNames(ix - 1)._2 != name) Some(fan) else None
+ }
+ val (directoryFilesAndNames, fileFilesAndNames) = deduped.partition(_._1.isDirectory)
+ def maxNameLength(seq: Seq[(File, String)]) = if (seq.isEmpty) 0 else seq.map(_._2.length).max
+ val maxNameLen = math.max(maxNameLength(directoryFilesAndNames) + 1, maxNameLength(fileFilesAndNames))
+ val sb = new java.lang.StringBuilder
+ sb.append(html(0)).append(path).append(html(1)).append(path).append(html(2))
+ if (!isRoot) {
+ val secondToLastSlash = path.lastIndexOf('/', path.lastIndexOf('/', path.length - 1) - 1)
+ sb.append("../\n" format path.substring(0, secondToLastSlash))
+ }
+ def lastModified(file: File) = DateTime(file.lastModified).toIsoLikeDateTimeString
+ def start(name: String) =
+ sb.append("").append(name).append("")
+ .append(" " * (maxNameLen - name.length))
+ def renderDirectory(file: File, name: String) =
+ start(name + '/').append(" ").append(lastModified(file)).append('\n')
+ def renderFile(file: File, name: String) = {
+ val size = akka.http.impl.util.humanReadableByteCount(file.length, si = true)
+ start(name).append(" ").append(lastModified(file))
+ sb.append(" ".substring(size.length)).append(size).append('\n')
+ }
+ for ((file, name) ← directoryFilesAndNames) renderDirectory(file, name)
+ for ((file, name) ← fileFilesAndNames) renderFile(file, name)
+ if (isRoot && files.isEmpty) sb.append("(no files)\n")
+ sb.append(html(3))
+ if (renderVanityFooter) sb.append(html(4)).append(DateTime.now.toIsoLikeDateTimeString).append(html(5))
+ sb.append(html(6)).toString
}
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala
index bff78d9a12..4b170422f3 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala
@@ -89,7 +89,7 @@ object FormFieldDirectives extends FormFieldDirectives {
private val _formFieldMultiMap: Directive1[Map[String, List[String]]] = {
@tailrec def append(
- map: Map[String, List[String]],
+ map: Map[String, List[String]],
fields: immutable.Seq[(String, String)]): Map[String, List[String]] = {
if (fields.isEmpty) {
map
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala
index 172b22e4d9..87ae5a1910 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/HeaderDirectives.scala
@@ -21,13 +21,13 @@ trait HeaderDirectives {
import RouteDirectives._
/**
- * Checks that request comes from the same origin. Extracts the [[Origin]] header value and verifies that
- * allowed range contains the obtained value. In the case of absent of the [[Origin]] header rejects
- * with [[MissingHeaderRejection]]. If the origin value is not in the allowed range
- * rejects with an [[InvalidOriginRejection]] and [[StatusCodes.Forbidden]] status.
- *
- * @group header
- */
+ * Checks that request comes from the same origin. Extracts the [[Origin]] header value and verifies that
+ * allowed range contains the obtained value. In the case of absent of the [[Origin]] header rejects
+ * with [[MissingHeaderRejection]]. If the origin value is not in the allowed range
+ * rejects with an [[InvalidOriginRejection]] and [[StatusCodes.Forbidden]] status.
+ *
+ * @group header
+ */
def checkSameOrigin(allowed: HttpOriginRange): Directive0 = {
headerValueByType[Origin]().flatMap { origin ⇒
if (origin.origins.exists(allowed.matches)) pass
@@ -172,22 +172,18 @@ object HeaderMagnet extends LowPriorityHeaderMagnetImplicits {
* If possible we want to apply the special logic for [[ModeledCustomHeader]] to extract custom headers by type,
* otherwise the default `fromUnit` is good enough (for headers that the parser emits in the right type already).
*/
- implicit def fromUnitForModeledCustomHeader[T <: ModeledCustomHeader[T], H <: ModeledCustomHeaderCompanion[T]]
- (u: Unit)(implicit tag: ClassTag[T], companion: ModeledCustomHeaderCompanion[T]): HeaderMagnet[T] =
- fromClassTagForModeledCustomHeader[T, H](tag, companion)
+ implicit def fromUnitForModeledCustomHeader[T <: ModeledCustomHeader[T], H <: ModeledCustomHeaderCompanion[T]](u: Unit)(implicit tag: ClassTag[T], companion: ModeledCustomHeaderCompanion[T]): HeaderMagnet[T] =
+ fromClassTagForModeledCustomHeader[T, H](tag, companion)
+ implicit def fromClassForModeledCustomHeader[T <: ModeledCustomHeader[T], H <: ModeledCustomHeaderCompanion[T]](clazz: Class[T], companion: ModeledCustomHeaderCompanion[T]): HeaderMagnet[T] =
+ fromClassTagForModeledCustomHeader(ClassTag(clazz), companion)
- implicit def fromClassForModeledCustomHeader[T <: ModeledCustomHeader[T], H <: ModeledCustomHeaderCompanion[T]]
- (clazz: Class[T], companion: ModeledCustomHeaderCompanion[T]): HeaderMagnet[T] =
- fromClassTagForModeledCustomHeader(ClassTag(clazz), companion)
-
- implicit def fromClassTagForModeledCustomHeader[T <: ModeledCustomHeader[T], H <: ModeledCustomHeaderCompanion[T]]
- (tag: ClassTag[T], companion: ModeledCustomHeaderCompanion[T]): HeaderMagnet[T] =
+ implicit def fromClassTagForModeledCustomHeader[T <: ModeledCustomHeader[T], H <: ModeledCustomHeaderCompanion[T]](tag: ClassTag[T], companion: ModeledCustomHeaderCompanion[T]): HeaderMagnet[T] =
new HeaderMagnet[T] {
override def runtimeClass = tag.runtimeClass.asInstanceOf[Class[T]]
override def classTag = tag
override def extractPF = {
- case h if h.is(companion.lowercaseName) => companion.apply(h.value)
+ case h if h.is(companion.lowercaseName) ⇒ companion.apply(h.value)
}
}
@@ -199,11 +195,11 @@ trait LowPriorityHeaderMagnetImplicits {
// TODO DRY?
implicit def fromClassNormalJavaHeader[T <: akka.http.javadsl.model.HttpHeader](clazz: Class[T]): HeaderMagnet[T] =
- new HeaderMagnet[T] {
- override def classTag: ClassTag[T] = ClassTag(clazz)
- override def runtimeClass: Class[T] = clazz
- override def extractPF: PartialFunction[HttpHeader, T] = { case x if runtimeClass.isAssignableFrom(x.getClass) => x.asInstanceOf[T] }
- }
+ new HeaderMagnet[T] {
+ override def classTag: ClassTag[T] = ClassTag(clazz)
+ override def runtimeClass: Class[T] = clazz
+ override def extractPF: PartialFunction[HttpHeader, T] = { case x if runtimeClass.isAssignableFrom(x.getClass) ⇒ x.asInstanceOf[T] }
+ }
implicit def fromUnitNormalHeader[T <: HttpHeader](u: Unit)(implicit tag: ClassTag[T]): HeaderMagnet[T] =
fromClassTagNormalHeader(tag)
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/SecurityDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/SecurityDirectives.scala
index 16cab40e0e..7b845e3e15 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/SecurityDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/SecurityDirectives.scala
@@ -13,7 +13,7 @@ import akka.http.scaladsl.util.FastFuture._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.server.AuthenticationFailedRejection.{ CredentialsRejected, CredentialsMissing }
-import scala.util.{Try, Success}
+import scala.util.{ Try, Success }
/**
* Provides directives for securing an inner route using the standard Http authentication headers [[`WWW-Authenticate`]]
@@ -220,7 +220,7 @@ trait SecurityDirectives {
* @group security
*/
def authorize(check: RequestContext ⇒ Boolean): Directive0 =
- authorizeAsync(ctx => Future.successful(check(ctx)))
+ authorizeAsync(ctx ⇒ Future.successful(check(ctx)))
/**
* Asynchronous version of [[authorize]].
@@ -230,7 +230,7 @@ trait SecurityDirectives {
* @group security
*/
def authorizeAsync(check: ⇒ Future[Boolean]): Directive0 =
- authorizeAsync(ctx => check)
+ authorizeAsync(ctx ⇒ check)
/**
* Asynchronous version of [[authorize]].
@@ -241,10 +241,10 @@ trait SecurityDirectives {
*/
def authorizeAsync(check: RequestContext ⇒ Future[Boolean]): Directive0 =
extractExecutionContext.flatMap { implicit ec ⇒
- extract(check).flatMap[Unit] { fa =>
+ extract(check).flatMap[Unit] { fa ⇒
onComplete(fa).flatMap {
- case Success(true) => pass
- case _ => reject(AuthorizationFailedRejection)
+ case Success(true) ⇒ pass
+ case _ ⇒ reject(AuthorizationFailedRejection)
}
}
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/TimeoutDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/TimeoutDirectives.scala
index 5964538e5a..835adafa9c 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/TimeoutDirectives.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/TimeoutDirectives.scala
@@ -57,17 +57,16 @@ trait TimeoutDirectives {
* @group timeout
*/
def withRequestTimeout(timeout: Duration, handler: Option[HttpRequest ⇒ HttpResponse]): Directive0 =
- Directive { inner ⇒
- ctx ⇒
- ctx.request.header[`Timeout-Access`] match {
- case Some(t) ⇒
- handler match {
- case Some(h) ⇒ t.timeoutAccess.update(timeout, h)
- case _ ⇒ t.timeoutAccess.updateTimeout(timeout)
- }
- case _ ⇒ ctx.log.warning("withRequestTimeout was used in route however no request-timeout is set!")
- }
- inner()(ctx)
+ Directive { inner ⇒ ctx ⇒
+ ctx.request.header[`Timeout-Access`] match {
+ case Some(t) ⇒
+ handler match {
+ case Some(h) ⇒ t.timeoutAccess.update(timeout, h)
+ case _ ⇒ t.timeoutAccess.updateTimeout(timeout)
+ }
+ case _ ⇒ ctx.log.warning("withRequestTimeout was used in route however no request-timeout is set!")
+ }
+ inner()(ctx)
}
/**
@@ -80,13 +79,12 @@ trait TimeoutDirectives {
* @group timeout
*/
def withRequestTimeoutResponse(handler: HttpRequest ⇒ HttpResponse): Directive0 =
- Directive { inner ⇒
- ctx ⇒
- ctx.request.header[`Timeout-Access`] match {
- case Some(t) ⇒ t.timeoutAccess.updateHandler(handler)
- case _ ⇒ ctx.log.warning("withRequestTimeoutResponse was used in route however no request-timeout is set!")
- }
- inner()(ctx)
+ Directive { inner ⇒ ctx ⇒
+ ctx.request.header[`Timeout-Access`] match {
+ case Some(t) ⇒ t.timeoutAccess.updateHandler(handler)
+ case _ ⇒ ctx.log.warning("withRequestTimeoutResponse was used in route however no request-timeout is set!")
+ }
+ inner()(ctx)
}
}
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala
index 2933073b39..38aa5efe3b 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala
@@ -61,13 +61,13 @@ trait MultipartUnmarshallers {
createStrict = (_, parts) ⇒ Multipart.ByteRanges.Strict(parts))
def multipartUnmarshaller[T <: Multipart, BP <: Multipart.BodyPart, BPS <: Multipart.BodyPart.Strict](
- mediaRange: MediaRange,
- defaultContentType: ContentType,
- createBodyPart: (BodyPartEntity, List[HttpHeader]) ⇒ BP,
- createStreamed: (MediaType.Multipart, Source[BP, Any]) ⇒ T,
+ mediaRange: MediaRange,
+ defaultContentType: ContentType,
+ createBodyPart: (BodyPartEntity, List[HttpHeader]) ⇒ BP,
+ createStreamed: (MediaType.Multipart, Source[BP, Any]) ⇒ T,
createStrictBodyPart: (HttpEntity.Strict, List[HttpHeader]) ⇒ BPS,
- createStrict: (MediaType.Multipart, immutable.Seq[BPS]) ⇒ T)(implicit log: LoggingAdapter = NoLogging, parserSettings: ParserSettings = null): FromEntityUnmarshaller[T] =
- Unmarshaller.withMaterializer { implicit ec ⇒ mat =>
+ createStrict: (MediaType.Multipart, immutable.Seq[BPS]) ⇒ T)(implicit log: LoggingAdapter = NoLogging, parserSettings: ParserSettings = null): FromEntityUnmarshaller[T] =
+ Unmarshaller.withMaterializer { implicit ec ⇒ mat ⇒
entity ⇒
if (entity.contentType.mediaType.isMultipart && mediaRange.matches(entity.contentType.mediaType)) {
entity.contentType.mediaType.params.get("boundary") match {
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala
index 6fedce64a3..6b6ac6b96a 100755
--- a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala
@@ -40,9 +40,8 @@ trait PredefinedFromStringUnmarshallers {
implicit def CsvSeq[T](implicit unmarshaller: Unmarshaller[String, T]): Unmarshaller[String, immutable.Seq[T]] =
Unmarshaller.strict[String, immutable.Seq[String]] { string ⇒
string.split(",").toList
- } flatMap { implicit ec ⇒
- implicit mat ⇒ strings ⇒
- FastFuture.sequence(strings.map(unmarshaller(_)))
+ } flatMap { implicit ec ⇒ implicit mat ⇒ strings ⇒
+ FastFuture.sequence(strings.map(unmarshaller(_)))
}
val HexByte: Unmarshaller[String, Byte] =
diff --git a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala
index 0ef8e12940..d295b4bb38 100644
--- a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala
+++ b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala
@@ -104,16 +104,16 @@ object Unmarshaller
* an IllegalStateException will be thrown!
*/
def forContentTypes(ranges: ContentTypeRange*): FromEntityUnmarshaller[A] =
- Unmarshaller.withMaterializer { implicit ec ⇒
- implicit mat ⇒
- entity ⇒
- if (entity.contentType == ContentTypes.NoContentType || ranges.exists(_ matches entity.contentType)) {
- underlying(entity).fast.recover[A](barkAtUnsupportedContentTypeException(ranges, entity.contentType))
- } else FastFuture.failed(UnsupportedContentTypeException(ranges: _*))
+ Unmarshaller.withMaterializer { implicit ec ⇒ implicit mat ⇒
+ entity ⇒
+ if (entity.contentType == ContentTypes.NoContentType || ranges.exists(_ matches entity.contentType)) {
+ underlying(entity).fast.recover[A](barkAtUnsupportedContentTypeException(ranges, entity.contentType))
+ } else FastFuture.failed(UnsupportedContentTypeException(ranges: _*))
}
- private def barkAtUnsupportedContentTypeException(ranges: Seq[ContentTypeRange],
- newContentType: ContentType): PartialFunction[Throwable, Nothing] = {
+ private def barkAtUnsupportedContentTypeException(
+ ranges: Seq[ContentTypeRange],
+ newContentType: ContentType): PartialFunction[Throwable, Nothing] = {
case UnsupportedContentTypeException(supported) ⇒ throw new IllegalStateException(
s"Illegal use of `unmarshaller.forContentTypes($ranges)`: $newContentType is not supported by underlying marshaller!")
}
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala
index 7aa6c66ef7..5ac5c688a4 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala
@@ -4,27 +4,24 @@
package akka.remote.testconductor
import language.postfixOps
-import akka.actor.{ Actor, ActorRef, LoggingFSM, Props, NoSerializationVerificationNeeded }
-import RemoteConnection.getAddrString
-import TestConductorProtocol._
-import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent }
-import scala.concurrent.duration._
-import akka.pattern.ask
-import scala.concurrent.Await
-import akka.event.{ LoggingAdapter, Logging }
-import scala.util.control.NoStackTrace
-import akka.event.LoggingReceive
-import java.net.InetSocketAddress
-import scala.concurrent.Future
-import akka.actor.{ OneForOneStrategy, SupervisorStrategy, Status, Address }
-import java.util.concurrent.ConcurrentHashMap
-import akka.util.{ Timeout }
-import scala.reflect.classTag
-import akka.ConfigurationException
+
+import akka.actor.{ Actor, ActorRef, Address, DeadLetterSuppression, Deploy, FSM, LoggingFSM, NoSerializationVerificationNeeded, OneForOneStrategy, Props, Status, SupervisorStrategy }
import akka.AkkaException
+import akka.ConfigurationException
+import akka.event.LoggingReceive
+import akka.event.{ Logging, LoggingAdapter }
+import akka.pattern.ask
import akka.remote.transport.ThrottlerTransportAdapter.Direction
-import akka.actor.Deploy
-import akka.actor.DeadLetterSuppression
+import akka.util.Timeout
+import java.net.InetSocketAddress
+import java.util.concurrent.ConcurrentHashMap
+import org.jboss.netty.channel.{ Channel, ChannelHandlerContext, ChannelStateEvent, MessageEvent, SimpleChannelUpstreamHandler }
+import RemoteConnection.getAddrString
+import scala.concurrent.Await
+import scala.concurrent.duration._
+import scala.concurrent.Future
+import scala.reflect.classTag
+import scala.util.control.NoStackTrace
/**
* The conductor is the one orchestrating the test: it governs the
@@ -431,7 +428,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP
}
fsm ! ToClient(BarrierResult("initial startup", false))
} else {
- nodes += name -> c
+ nodes += name → c
if (initialParticipants <= 0) fsm ! ToClient(Done)
else if (nodes.size == initialParticipants) {
for (NodeInfo(_, _, client) ← nodes.values) client ! ToClient(Done)
@@ -451,7 +448,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP
case _: FailBarrier ⇒ barrier forward op
case GetAddress(node) ⇒
if (nodes contains node) sender() ! ToClient(AddressReply(node, nodes(node).addr))
- else addrInterest += node -> ((addrInterest get node getOrElse Set()) + sender())
+ else addrInterest += node → ((addrInterest get node getOrElse Set()) + sender())
case _: Done ⇒ //FIXME what should happen?
}
case op: CommandOp ⇒
@@ -523,6 +520,7 @@ private[akka] object BarrierCoordinator {
private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] {
import BarrierCoordinator._
import Controller._
+ import FSM.`→`
// this shall be set to true if all subsequent barriers shall fail
var failed = false
@@ -567,8 +565,8 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor
}
onTransition {
- case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, nextStateData.deadline.timeLeft, false)
- case Waiting -> Idle ⇒ cancelTimer("Timeout")
+ case Idle → Waiting ⇒ setTimer("Timeout", StateTimeout, nextStateData.deadline.timeLeft, false)
+ case Waiting → Idle ⇒ cancelTimer("Timeout")
}
when(Waiting) {
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala
index 94a046b13c..faa0582c13 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala
@@ -133,7 +133,8 @@ private[akka] class MsgDecoder extends OneToOneDecoder {
case BarrierOp.Succeeded ⇒ BarrierResult(barrier.getName, true)
case BarrierOp.Failed ⇒ BarrierResult(barrier.getName, false)
case BarrierOp.Fail ⇒ FailBarrier(barrier.getName)
- case BarrierOp.Enter ⇒ EnterBarrier(barrier.getName,
+ case BarrierOp.Enter ⇒ EnterBarrier(
+ barrier.getName,
if (barrier.hasTimeout) Option(Duration.fromNanos(barrier.getTimeout)) else None)
}
} else if (w.hasFailure) {
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala
index d698c673bc..d2ef4452b8 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala
@@ -192,8 +192,8 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress)
case Event(ToServer(msg), d @ Data(Some(channel), None)) ⇒
channel.write(msg)
val token = msg match {
- case EnterBarrier(barrier, timeout) ⇒ Some(barrier -> sender())
- case GetAddress(node) ⇒ Some(node.name -> sender())
+ case EnterBarrier(barrier, timeout) ⇒ Some(barrier → sender())
+ case GetAddress(node) ⇒ Some(node.name → sender())
case _ ⇒ None
}
stay using d.copy(runningOp = token)
@@ -274,13 +274,13 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress)
* INTERNAL API.
*/
private[akka] class PlayerHandler(
- server: InetSocketAddress,
+ server: InetSocketAddress,
private var reconnects: Int,
- backoff: FiniteDuration,
- poolSize: Int,
- fsm: ActorRef,
- log: LoggingAdapter,
- scheduler: Scheduler)(implicit executor: ExecutionContext)
+ backoff: FiniteDuration,
+ poolSize: Int,
+ fsm: ActorRef,
+ log: LoggingAdapter,
+ scheduler: Scheduler)(implicit executor: ExecutionContext)
extends SimpleChannelUpstreamHandler {
import ClientFSM._
diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala
index aae55a1db7..7bf179e12a 100644
--- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala
+++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala
@@ -41,7 +41,7 @@ abstract class MultiNodeConfig {
*/
def nodeConfig(roles: RoleName*)(configs: Config*): Unit = {
val c = configs.reduceLeft(_ withFallback _)
- _nodeConf ++= roles map { _ -> c }
+ _nodeConf ++= roles map { _ → c }
}
/**
@@ -78,7 +78,7 @@ abstract class MultiNodeConfig {
}
def deployOn(role: RoleName, deployment: String): Unit =
- _deployments += role -> ((_deployments get role getOrElse Vector()) :+ deployment)
+ _deployments += role → ((_deployments get role getOrElse Vector()) :+ deployment)
def deployOnAll(deployment: String): Unit = _allDeploy :+= deployment
@@ -195,9 +195,9 @@ object MultiNodeSpec {
require(selfIndex >= 0 && selfIndex < maxNodes, "multinode.index is out of bounds: " + selfIndex)
private[testkit] val nodeConfig = mapToConfig(Map(
- "akka.actor.provider" -> "akka.remote.RemoteActorRefProvider",
- "akka.remote.netty.tcp.hostname" -> selfName,
- "akka.remote.netty.tcp.port" -> selfPort))
+ "akka.actor.provider" → "akka.remote.RemoteActorRefProvider",
+ "akka.remote.netty.tcp.hostname" → selfName,
+ "akka.remote.netty.tcp.port" → selfPort))
private[testkit] val baseConfig: Config = ConfigFactory.parseString("""
akka {
diff --git a/akka-parsing/build.sbt b/akka-parsing/build.sbt
index 7703337865..04bfd07854 100644
--- a/akka-parsing/build.sbt
+++ b/akka-parsing/build.sbt
@@ -1,4 +1,5 @@
import akka._
+import com.typesafe.sbt.SbtScalariform.ScalariformKeys
AkkaBuild.defaultSettings
Formatting.docFormatSettings
diff --git a/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala b/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala
index 5b454c701c..716b66867d 100644
--- a/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala
+++ b/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala
@@ -34,13 +34,14 @@ import java.lang.{ StringBuilder ⇒ JStringBuilder }
* Set to a value < 0 to disable tab expansion.
* @param traceCutOff the maximum number of (trailing) characters shown for a rule trace
*/
-class ErrorFormatter(showExpected: Boolean = true,
- showPosition: Boolean = true,
- showLine: Boolean = true,
- showTraces: Boolean = false,
- showFrameStartOffset: Boolean = true,
- expandTabs: Int = -1,
- traceCutOff: Int = 120) {
+class ErrorFormatter(
+ showExpected: Boolean = true,
+ showPosition: Boolean = true,
+ showLine: Boolean = true,
+ showTraces: Boolean = false,
+ showFrameStartOffset: Boolean = true,
+ expandTabs: Int = -1,
+ traceCutOff: Int = 120) {
/**
* Formats the given [[ParseError]] into a String using the settings configured for this formatter instance.
@@ -225,8 +226,9 @@ class ErrorFormatter(showExpected: Boolean = true,
/**
* Formats the head element of a [[RuleTrace]] into a String.
*/
- def formatNonTerminal(nonTerminal: RuleTrace.NonTerminal,
- showFrameStartOffset: Boolean = showFrameStartOffset): String = {
+ def formatNonTerminal(
+ nonTerminal: RuleTrace.NonTerminal,
+ showFrameStartOffset: Boolean = showFrameStartOffset): String = {
import RuleTrace._
import CharUtils.escape
val keyString = nonTerminal.key match {
diff --git a/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala b/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala
index 7c62c2f0d7..bbddb1d3c0 100644
--- a/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala
+++ b/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala
@@ -19,9 +19,10 @@ package akka.parboiled2
import scala.annotation.tailrec
import scala.collection.immutable
-case class ParseError(position: Position,
- principalPosition: Position,
- traces: immutable.Seq[RuleTrace]) extends RuntimeException {
+case class ParseError(
+ position: Position,
+ principalPosition: Position,
+ traces: immutable.Seq[RuleTrace]) extends RuntimeException {
require(principalPosition.index >= position.index, "principalPosition must be > position")
def format(parser: Parser): String = format(parser.input)
def format(parser: Parser, formatter: ErrorFormatter): String = format(parser.input, formatter)
diff --git a/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala b/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala
index 5471a30fc9..78959023cd 100644
--- a/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala
+++ b/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala
@@ -23,8 +23,9 @@ import scala.util.control.{ NonFatal, NoStackTrace }
import akka.shapeless._
import akka.parboiled2.support._
-abstract class Parser(initialValueStackSize: Int = 16,
- maxValueStackSize: Int = 1024) extends RuleDSL {
+abstract class Parser(
+ initialValueStackSize: Int = 16,
+ maxValueStackSize: Int = 1024) extends RuleDSL {
import Parser._
require(maxValueStackSize <= 65536, "`maxValueStackSize` > 2^16 is not supported") // due to current snapshot design
@@ -176,7 +177,7 @@ abstract class Parser(initialValueStackSize: Int = 16,
@tailrec
def phase4_collectRuleTraces(reportedErrorIndex: Int, principalErrorIndex: Int, reportQuiet: Boolean)(
- phase3: CollectingRuleTraces = new CollectingRuleTraces(reportedErrorIndex, reportQuiet),
+ phase3: CollectingRuleTraces = new CollectingRuleTraces(reportedErrorIndex, reportQuiet),
traces: VectorBuilder[RuleTrace] = new VectorBuilder): ParseError = {
def done = {
@@ -592,8 +593,8 @@ object Parser {
// or -1 if no atomic rule fails with a mismatch at the principal error index
private class EstablishingReportedErrorIndex(
private var _principalErrorIndex: Int,
- var currentAtomicStart: Int = Int.MinValue,
- var maxAtomicErrorStart: Int = Int.MinValue) extends ErrorAnalysisPhase {
+ var currentAtomicStart: Int = Int.MinValue,
+ var maxAtomicErrorStart: Int = Int.MinValue) extends ErrorAnalysisPhase {
def reportedErrorIndex = if (maxAtomicErrorStart >= 0) maxAtomicErrorStart else _principalErrorIndex
def applyOffset(offset: Int) = {
_principalErrorIndex -= offset
@@ -606,8 +607,8 @@ object Parser {
// in which case we need to report them even though they are marked as "quiet"
private class DetermineReportQuiet(
private var _minErrorIndex: Int, // the smallest index at which a mismatch triggers a StartTracingException
- var inQuiet: Boolean = false // are we currently in a quiet rule?
- ) extends ErrorAnalysisPhase {
+ var inQuiet: Boolean = false // are we currently in a quiet rule?
+ ) extends ErrorAnalysisPhase {
def minErrorIndex = _minErrorIndex
def applyOffset(offset: Int) = _minErrorIndex -= offset
}
@@ -615,11 +616,11 @@ object Parser {
// collect the traces of all mismatches happening at an index >= minErrorIndex (the reported error index)
// by throwing a StartTracingException which gets turned into a TracingBubbleException by the terminal rule
private class CollectingRuleTraces(
- var minErrorIndex: Int, // the smallest index at which a mismatch triggers a StartTracingException
- val reportQuiet: Boolean, // do we need to trace mismatches from quiet rules?
- val traceNr: Int = 0, // the zero-based index number of the RuleTrace we are currently building
- var errorMismatches: Int = 0 // the number of times we have already seen a mismatch at >= minErrorIndex
- ) extends ErrorAnalysisPhase {
+ var minErrorIndex: Int, // the smallest index at which a mismatch triggers a StartTracingException
+ val reportQuiet: Boolean, // do we need to trace mismatches from quiet rules?
+ val traceNr: Int = 0, // the zero-based index number of the RuleTrace we are currently building
+ var errorMismatches: Int = 0 // the number of times we have already seen a mismatch at >= minErrorIndex
+ ) extends ErrorAnalysisPhase {
def applyOffset(offset: Int) = minErrorIndex -= offset
}
}
diff --git a/akka-parsing/src/main/scala/akka/parboiled2/Rule.scala b/akka-parsing/src/main/scala/akka/parboiled2/Rule.scala
index 50bcc68db8..a74936d423 100644
--- a/akka-parsing/src/main/scala/akka/parboiled2/Rule.scala
+++ b/akka-parsing/src/main/scala/akka/parboiled2/Rule.scala
@@ -46,7 +46,8 @@ sealed class Rule[-I <: HList, +O <: HList] extends RuleX {
* Rule[A, B:C] ~ Rule[D:B:C, E:F] = Rule[D:A, E:F]
*/
@compileTimeOnly("Calls to `~` must be inside `rule` macro")
- def ~[I2 <: HList, O2 <: HList](that: Rule[I2, O2])(implicit i: TailSwitch[I2, O @uncheckedVariance, I @uncheckedVariance],
+ def ~[I2 <: HList, O2 <: HList](that: Rule[I2, O2])(implicit
+ i: TailSwitch[I2, O @uncheckedVariance, I @uncheckedVariance],
o: TailSwitch[O @uncheckedVariance, I2, O2]): Rule[i.Out, o.Out] = `n/a`
/**
@@ -54,7 +55,8 @@ sealed class Rule[-I <: HList, +O <: HList] extends RuleX {
* If the rule being concatenated doesn't match a parse error will be triggered immediately.
*/
@compileTimeOnly("Calls to `~!~` must be inside `rule` macro")
- def ~!~[I2 <: HList, O2 <: HList](that: Rule[I2, O2])(implicit i: TailSwitch[I2, O @uncheckedVariance, I @uncheckedVariance],
+ def ~!~[I2 <: HList, O2 <: HList](that: Rule[I2, O2])(implicit
+ i: TailSwitch[I2, O @uncheckedVariance, I @uncheckedVariance],
o: TailSwitch[O @uncheckedVariance, I2, O2]): Rule[i.Out, o.Out] = `n/a`
/**
diff --git a/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala b/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala
index a9f9dfc95c..168ec7d503 100644
--- a/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala
+++ b/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala
@@ -397,13 +397,13 @@ trait OpTreeContext[OpTreeCtx <: ParserMacros.ParserContext] {
if (i <= 0) c.abort(base.pos, "`x` in `x.times` must be positive")
else if (i == 1) rule
else Times(rule, q"val min, max = $n", collector, separator)
- case x@(Ident(_) | Select(_, _)) ⇒ Times(rule, q"val min = $n; val max = min", collector, separator)
- case _ ⇒ c.abort(n.pos, "Invalid int base expression for `.times(...)`: " + n)
+ case x @ (Ident(_) | Select(_, _)) ⇒ Times(rule, q"val min = $n; val max = min", collector, separator)
+ case _ ⇒ c.abort(n.pos, "Invalid int base expression for `.times(...)`: " + n)
}
case q"$a.this.range2NTimes($r)" ⇒ r match {
- case q"scala.Predef.intWrapper($mn).to($mx)" ⇒ handleRange(mn, mx, r) // Scala 2.12
+ case q"scala.Predef.intWrapper($mn).to($mx)" ⇒ handleRange(mn, mx, r) // Scala 2.12
case q"scala.this.Predef.intWrapper($mn).to($mx)" ⇒ handleRange(mn, mx, r) // Scala 2.11
- case x@(Ident(_) | Select(_, _)) ⇒
+ case x @ (Ident(_) | Select(_, _)) ⇒
Times(rule, q"val r = $r; val min = r.start; val max = r.end", collector, separator)
case _ ⇒ c.abort(r.pos, "Invalid range base expression for `.times(...)`: " + r)
}
@@ -689,11 +689,11 @@ trait OpTreeContext[OpTreeCtx <: ParserMacros.ParserContext] {
/////////////////////////////////// helpers ////////////////////////////////////
class Collector(
- val valBuilder: Tree,
- val popToBuilder: Tree,
+ val valBuilder: Tree,
+ val popToBuilder: Tree,
val pushBuilderResult: Tree,
- val pushSomePop: Tree,
- val pushNone: Tree)
+ val pushSomePop: Tree,
+ val pushNone: Tree)
lazy val rule0Collector = {
val unit = q"()"
diff --git a/akka-parsing/src/main/scala/akka/shapeless/hlists.scala b/akka-parsing/src/main/scala/akka/shapeless/hlists.scala
index e14c8be049..0353e89c35 100644
--- a/akka-parsing/src/main/scala/akka/shapeless/hlists.scala
+++ b/akka-parsing/src/main/scala/akka/shapeless/hlists.scala
@@ -16,7 +16,6 @@
package akka.shapeless
-
/**
* `HList` ADT base trait.
*
diff --git a/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala b/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala
index 6f2b677348..2b12cef49a 100644
--- a/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala
+++ b/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala
@@ -17,7 +17,6 @@
package akka.shapeless
package ops
-
object hlist {
/**
* Type class witnessing that this `HList` is composite and providing access to head and tail.
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala
index b6b17067c9..67ac0c15ae 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala
@@ -8,7 +8,7 @@ package akka.persistence.query
* [[akka.persistence.query.scaladsl.EventsByTagQuery]] query, or similar queries.
*/
final case class EventEnvelope(
- offset: Long,
+ offset: Long,
persistenceId: String,
- sequenceNr: Long,
- event: Any)
+ sequenceNr: Long,
+ event: Any)
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala
index ed86bfb7c6..059dc8f329 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala
@@ -71,7 +71,8 @@ class PersistenceQuery(system: ExtendedActorSystem) extends Extension {
}
private def createPlugin(configPath: String): ReadJournalProvider = {
- require(!isEmpty(configPath) && system.settings.config.hasPath(configPath),
+ require(
+ !isEmpty(configPath) && system.settings.config.hasPath(configPath),
s"'reference.conf' is missing persistence read journal plugin config path: '${configPath}'")
val pluginConfig = system.settings.config.getConfig(configPath)
val pluginClassName = pluginConfig.getString("class")
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala
index 63fa0f6312..e5215658e6 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala
@@ -123,7 +123,7 @@ private[akka] abstract class AbstractEventsByPersistenceIdPublisher(
private[akka] class LiveEventsByPersistenceIdPublisher(
persistenceId: String, fromSequenceNr: Long, override val toSequenceNr: Long,
refreshInterval: FiniteDuration,
- maxBufSize: Int, writeJournalPluginId: String)
+ maxBufSize: Int, writeJournalPluginId: String)
extends AbstractEventsByPersistenceIdPublisher(
persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) {
import EventsByPersistenceIdPublisher._
diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala
index 49903e5282..4f2d41d41b 100644
--- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala
+++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala
@@ -125,7 +125,7 @@ private[akka] abstract class AbstractEventsByTagPublisher(
private[akka] class LiveEventsByTagPublisher(
tag: String, fromOffset: Long, override val toOffset: Long,
refreshInterval: FiniteDuration,
- maxBufSize: Int, writeJournalPluginId: String)
+ maxBufSize: Int, writeJournalPluginId: String)
extends AbstractEventsByTagPublisher(
tag, fromOffset, maxBufSize, writeJournalPluginId) {
import EventsByTagPublisher._
diff --git a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala
index 399297bb6a..b6745c6818 100644
--- a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala
@@ -308,7 +308,8 @@ trait AtLeastOnceDeliveryLike extends Eventsourced {
* as a blob in your custom snapshot.
*/
def getDeliverySnapshot: AtLeastOnceDeliverySnapshot =
- AtLeastOnceDeliverySnapshot(deliverySequenceNr,
+ AtLeastOnceDeliverySnapshot(
+ deliverySequenceNr,
unconfirmed.map { case (deliveryId, d) ⇒ UnconfirmedDelivery(deliveryId, d.destination, d.message) }(breakOut))
/**
@@ -319,7 +320,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced {
deliverySequenceNr = snapshot.currentDeliveryId
val now = System.nanoTime()
unconfirmed = snapshot.unconfirmedDeliveries.map(d ⇒
- d.deliveryId -> Delivery(d.destination, d.message, now, 0))(breakOut)
+ d.deliveryId → Delivery(d.destination, d.message, now, 0))(breakOut)
}
/**
diff --git a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala
index 72c0067e14..6b8c1d1b20 100644
--- a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala
@@ -145,7 +145,8 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas
* @param event the event that was to be persisted
*/
protected def onPersistRejected(cause: Throwable, event: Any, seqNr: Long): Unit = {
- log.warning("Rejected to persist event type [{}] with sequence number [{}] for persistenceId [{}] due to [{}].",
+ log.warning(
+ "Rejected to persist event type [{}] with sequence number [{}] for persistenceId [{}] due to [{}].",
event.getClass.getName, seqNr, persistenceId, cause.getMessage)
}
@@ -229,7 +230,8 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas
case DeleteSnapshotsFailure(c, e) ⇒
log.warning("Failed to deleteSnapshots given criteria [{}] due to: [{}: {}]", c, e.getClass.getCanonicalName, e.getMessage)
case DeleteMessagesFailure(e, toSequenceNr) ⇒
- log.warning("Failed to deleteMessages toSequenceNr [{}] for persistenceId [{}] due to [{}: {}].",
+ log.warning(
+ "Failed to deleteMessages toSequenceNr [{}] for persistenceId [{}] due to [{}: {}].",
toSequenceNr, persistenceId, e.getClass.getCanonicalName, e.getMessage)
case m ⇒ super.unhandled(m)
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala
index 7cd3d31a33..c8dd4b7e49 100644
--- a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala
@@ -305,7 +305,8 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
private class PluginHolderExtensionId(configPath: String, fallbackPath: String) extends ExtensionId[PluginHolder] {
override def createExtension(system: ExtendedActorSystem): PluginHolder = {
- require(!isEmpty(configPath) && system.settings.config.hasPath(configPath),
+ require(
+ !isEmpty(configPath) && system.settings.config.hasPath(configPath),
s"'reference.conf' is missing persistence plugin config path: '$configPath'")
val config: Config = system.settings.config.getConfig(configPath)
.withFallback(system.settings.config.getConfig(fallbackPath))
diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala
index 2e1ed023b1..b50cd80e47 100644
--- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala
@@ -42,7 +42,8 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per
case l: List[PersistentRepr] ⇒ l.tail.nonEmpty // avoids calling .size
case v: Vector[PersistentRepr] ⇒ v.size > 1
case _ ⇒ true // some other collection type, let's just check
- }) require(payload.forall(_.persistenceId == payload.head.persistenceId),
+ }) require(
+ payload.forall(_.persistenceId == payload.head.persistenceId),
"AtomicWrite must contain messages for the same persistenceId, " +
s"yet different persistenceIds found: ${payload.map(_.persistenceId).toSet}")
@@ -115,11 +116,11 @@ trait PersistentRepr extends Message {
* Creates a new copy of this [[PersistentRepr]].
*/
def update(
- sequenceNr: Long = sequenceNr,
- persistenceId: String = persistenceId,
- deleted: Boolean = deleted,
- sender: ActorRef = sender,
- writerUuid: String = writerUuid): PersistentRepr
+ sequenceNr: Long = sequenceNr,
+ persistenceId: String = persistenceId,
+ deleted: Boolean = deleted,
+ sender: ActorRef = sender,
+ writerUuid: String = writerUuid): PersistentRepr
}
object PersistentRepr {
@@ -132,13 +133,13 @@ object PersistentRepr {
* Plugin API.
*/
def apply(
- payload: Any,
- sequenceNr: Long = 0L,
- persistenceId: String = PersistentRepr.Undefined,
- manifest: String = PersistentRepr.Undefined,
- deleted: Boolean = false,
- sender: ActorRef = null,
- writerUuid: String = PersistentRepr.Undefined): PersistentRepr =
+ payload: Any,
+ sequenceNr: Long = 0L,
+ persistenceId: String = PersistentRepr.Undefined,
+ manifest: String = PersistentRepr.Undefined,
+ deleted: Boolean = false,
+ sender: ActorRef = null,
+ writerUuid: String = PersistentRepr.Undefined): PersistentRepr =
PersistentImpl(payload, sequenceNr, persistenceId, manifest, deleted, sender, writerUuid)
/**
@@ -157,13 +158,13 @@ object PersistentRepr {
* INTERNAL API.
*/
private[persistence] final case class PersistentImpl(
- override val payload: Any,
- override val sequenceNr: Long,
+ override val payload: Any,
+ override val sequenceNr: Long,
override val persistenceId: String,
- override val manifest: String,
- override val deleted: Boolean,
- override val sender: ActorRef,
- override val writerUuid: String) extends PersistentRepr with NoSerializationVerificationNeeded {
+ override val manifest: String,
+ override val deleted: Boolean,
+ override val sender: ActorRef,
+ override val writerUuid: String) extends PersistentRepr with NoSerializationVerificationNeeded {
def withPayload(payload: Any): PersistentRepr =
copy(payload = payload)
diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala
index cccb635c4d..b95689458b 100644
--- a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala
@@ -52,8 +52,8 @@ final case class DeleteMessagesFailure(cause: Throwable, toSequenceNr: Long)
@SerialVersionUID(1L)
final case class Recovery(
fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest,
- toSequenceNr: Long = Long.MaxValue,
- replayMax: Long = Long.MaxValue)
+ toSequenceNr: Long = Long.MaxValue,
+ replayMax: Long = Long.MaxValue)
object Recovery {
diff --git a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala
index 8ab09fb0f4..8a489dbd33 100644
--- a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala
@@ -107,9 +107,9 @@ final case class SnapshotOffer(metadata: SnapshotMetadata, snapshot: Any)
@SerialVersionUID(1L)
final case class SnapshotSelectionCriteria(
maxSequenceNr: Long = Long.MaxValue,
- maxTimestamp: Long = Long.MaxValue,
+ maxTimestamp: Long = Long.MaxValue,
minSequenceNr: Long = 0L,
- minTimestamp: Long = 0L) {
+ minTimestamp: Long = 0L) {
/**
* INTERNAL API.
diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala
index c2cef062e8..6d8226cd54 100644
--- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala
@@ -49,8 +49,8 @@ trait PersistentFSM[S <: FSMState, D, E] extends PersistentActor with Persistent
lazy val statesMap: Map[String, S] = stateNames.map(name ⇒ (name.identifier, name)).toMap
/**
- * Timeout set for the current state. Used when saving a snapshot
- */
+ * Timeout set for the current state. Used when saving a snapshot
+ */
private var currentStateTimeout: Option[FiniteDuration] = None
/**
@@ -68,8 +68,8 @@ trait PersistentFSM[S <: FSMState, D, E] extends PersistentActor with Persistent
def onRecoveryCompleted(): Unit = {}
/**
- * Save the current state as a snapshot
- */
+ * Save the current state as a snapshot
+ */
final def saveStateSnapshot(): Unit = {
saveSnapshot(PersistentFSMSnapshot(stateName.identifier, stateData, currentStateTimeout))
}
@@ -85,9 +85,9 @@ trait PersistentFSM[S <: FSMState, D, E] extends PersistentActor with Persistent
* Discover the latest recorded state
*/
override def receiveRecover: Receive = {
- case domainEventTag(event) ⇒ startWith(stateName, applyEvent(event, stateData))
+ case domainEventTag(event) ⇒ startWith(stateName, applyEvent(event, stateData))
case StateChangeEvent(stateIdentifier, timeout) ⇒ startWith(statesMap(stateIdentifier), stateData, timeout)
- case SnapshotOffer(_, PersistentFSMSnapshot(stateIdentifier, data: D, timeout)) => startWith(statesMap(stateIdentifier), data, timeout)
+ case SnapshotOffer(_, PersistentFSMSnapshot(stateIdentifier, data: D, timeout)) ⇒ startWith(statesMap(stateIdentifier), data, timeout)
case RecoveryCompleted ⇒
initialize()
onRecoveryCompleted()
@@ -147,13 +147,13 @@ object PersistentFSM {
private[persistence] case class StateChangeEvent(stateIdentifier: String, timeout: Option[FiniteDuration]) extends PersistentFsmEvent
/**
- * FSM state and data snapshot
- *
- * @param stateIdentifier FSM state identifier
- * @param data FSM state data
- * @param timeout FSM state timeout
- * @tparam D state data type
- */
+ * FSM state and data snapshot
+ *
+ * @param stateIdentifier FSM state identifier
+ * @param data FSM state data
+ * @param timeout FSM state timeout
+ * @tparam D state data type
+ */
private[persistence] case class PersistentFSMSnapshot[D](stateIdentifier: String, data: D, timeout: Option[FiniteDuration]) extends Message
/**
@@ -259,9 +259,10 @@ object PersistentFSM {
* This extractor is just convenience for matching a (S, S) pair, including a
* reminder what the new state is.
*/
- object -> {
+ object `->` {
def unapply[S](in: (S, S)) = Some(in)
}
+ val `→` = `->`
/**
* Log Entry of the [[akka.actor.LoggingFSM]], can be obtained by calling `getLog`.
@@ -275,13 +276,13 @@ object PersistentFSM {
* to be executed after FSM moves to the new state (also triggered when staying in the same state)
*/
final case class State[S, D, E](
- stateName: S,
- stateData: D,
- timeout: Option[FiniteDuration] = None,
- stopReason: Option[Reason] = None,
- replies: List[Any] = Nil,
- domainEvents: Seq[E] = Nil,
- afterTransitionDo: D ⇒ Unit = { _: D ⇒ })(private[akka] val notifies: Boolean = true) {
+ stateName: S,
+ stateData: D,
+ timeout: Option[FiniteDuration] = None,
+ stopReason: Option[Reason] = None,
+ replies: List[Any] = Nil,
+ domainEvents: Seq[E] = Nil,
+ afterTransitionDo: D ⇒ Unit = { _: D ⇒ })(private[akka] val notifies: Boolean = true) {
/**
* Copy object and update values if needed.
diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala
index ad30835e5e..9994e95154 100644
--- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala
@@ -113,7 +113,7 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging
* This extractor is just convenience for matching a (S, S) pair, including a
* reminder what the new state is.
*/
- val -> = PersistentFSM.->
+ val `->` = PersistentFSM.`->`
/**
* This case object is received in case of a state timeout.
@@ -669,9 +669,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D
* @param stateTimeout default state timeout for this state
* @param stateFunctionBuilder partial function builder describing response to input
*/
- final def when(stateName: S,
- stateTimeout: FiniteDuration,
- stateFunctionBuilder: FSMStateFunctionBuilder[S, D, E]): Unit =
+ final def when(
+ stateName: S,
+ stateTimeout: FiniteDuration,
+ stateFunctionBuilder: FSMStateFunctionBuilder[S, D, E]): Unit =
when(stateName, stateTimeout)(stateFunctionBuilder.build())
/**
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala
index 417d20f565..6623d6d2d1 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala
@@ -281,7 +281,7 @@ private[persistence] object AsyncWriteJournal {
delivered = d.snr
d.target.tell(d.msg, d.sender)
} else {
- delayed += (d.snr -> d)
+ delayed += (d.snr → d)
}
val ro = delayed.remove(delivered + 1)
if (ro.isDefined) resequence(ro.get)
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala
index a5e237b1cb..847bbfcd58 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala
@@ -20,9 +20,9 @@ import scala.util.Try
* `EventAdapters` serves as a per-journal collection of bound event adapters.
*/
class EventAdapters(
- map: ConcurrentHashMap[Class[_], EventAdapter],
+ map: ConcurrentHashMap[Class[_], EventAdapter],
bindings: immutable.Seq[(Class[_], EventAdapter)],
- log: LoggingAdapter) {
+ log: LoggingAdapter) {
/**
* Finds the "most specific" matching adapter for the given class (i.e. it may return an adapter that can work on a
@@ -71,20 +71,21 @@ private[akka] object EventAdapters {
}
private def apply(
- system: ExtendedActorSystem,
- adapters: Map[Name, FQN],
+ system: ExtendedActorSystem,
+ adapters: Map[Name, FQN],
adapterBindings: Map[FQN, BoundAdapters]): EventAdapters = {
val adapterNames = adapters.keys.toSet
for {
(fqn, boundToAdapters) ← adapterBindings
boundAdapter ← boundToAdapters
- } require(adapterNames(boundAdapter.toString),
+ } require(
+ adapterNames(boundAdapter.toString),
s"$fqn was bound to undefined event-adapter: $boundAdapter (bindings: ${boundToAdapters.mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})")
// A Map of handler from alias to implementation (i.e. class implementing akka.serialization.Serializer)
// For example this defines a handler named 'country': `"country" -> com.example.comain.CountryTagsAdapter`
- val handlers = for ((k: String, v: String) ← adapters) yield k -> instantiateAdapter(v, system).get
+ val handlers = for ((k: String, v: String) ← adapters) yield k → instantiateAdapter(v, system).get
// bindings is a Seq of tuple representing the mapping from Class to handler.
// It is primarily ordered by the most specific classes first, and secondly in the configured order.
@@ -131,7 +132,7 @@ private[akka] object EventAdapters {
* loading is performed by the system’s [[akka.actor.DynamicAccess]].
*/
private def instantiate[T: ClassTag](fqn: FQN, system: ExtendedActorSystem): Try[T] =
- system.dynamicAccess.createInstanceFor[T](fqn, List(classOf[ExtendedActorSystem] -> system)) recoverWith {
+ system.dynamicAccess.createInstanceFor[T](fqn, List(classOf[ExtendedActorSystem] → system)) recoverWith {
case _: NoSuchMethodException ⇒ system.dynamicAccess.createInstanceFor[T](fqn, Nil)
}
@@ -151,7 +152,7 @@ private[akka] object EventAdapters {
private final def configToMap(config: Config, path: String): Map[String, String] = {
import scala.collection.JavaConverters._
if (config.hasPath(path)) {
- config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ k -> v.toString }
+ config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ k → v.toString }
} else Map.empty
}
@@ -159,8 +160,8 @@ private[akka] object EventAdapters {
import scala.collection.JavaConverters._
if (config.hasPath(path)) {
config.getConfig(path).root.unwrapped.asScala.toMap map {
- case (k, v: util.ArrayList[_]) if v.isInstanceOf[util.ArrayList[_]] ⇒ k -> v.asScala.map(_.toString).toList
- case (k, v) ⇒ k -> List(v.toString)
+ case (k, v: util.ArrayList[_]) if v.isInstanceOf[util.ArrayList[_]] ⇒ k → v.asScala.map(_.toString).toList
+ case (k, v) ⇒ k → List(v.toString)
}
} else Map.empty
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala
index 87e6e68467..4df0bb0d4f 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala
@@ -20,10 +20,10 @@ import scala.collection.mutable.LinkedHashSet
private[akka] object ReplayFilter {
def props(
persistentActor: ActorRef,
- mode: Mode,
- windowSize: Int,
- maxOldWriters: Int,
- debugEnabled: Boolean): Props = {
+ mode: Mode,
+ windowSize: Int,
+ maxOldWriters: Int,
+ debugEnabled: Boolean): Props = {
require(windowSize > 0, "windowSize must be > 0")
require(maxOldWriters > 0, "maxOldWriters must be > 0")
require(mode != Disabled, "mode must not be Disabled")
@@ -33,9 +33,9 @@ private[akka] object ReplayFilter {
// for binary compatibility
def props(
persistentActor: ActorRef,
- mode: Mode,
- windowSize: Int,
- maxOldWriters: Int): Props = props(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false)
+ mode: Mode,
+ windowSize: Int,
+ maxOldWriters: Int): Props = props(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false)
sealed trait Mode
case object Fail extends Mode
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala
index 771c860141..e52b90228a 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala
@@ -54,17 +54,17 @@ private[persistence] trait InmemMessages {
var messages = Map.empty[String, Vector[PersistentRepr]]
def add(p: PersistentRepr): Unit = messages = messages + (messages.get(p.persistenceId) match {
- case Some(ms) ⇒ p.persistenceId -> (ms :+ p)
- case None ⇒ p.persistenceId -> Vector(p)
+ case Some(ms) ⇒ p.persistenceId → (ms :+ p)
+ case None ⇒ p.persistenceId → Vector(p)
})
def update(pid: String, snr: Long)(f: PersistentRepr ⇒ PersistentRepr): Unit = messages = messages.get(pid) match {
- case Some(ms) ⇒ messages + (pid -> ms.map(sp ⇒ if (sp.sequenceNr == snr) f(sp) else sp))
+ case Some(ms) ⇒ messages + (pid → ms.map(sp ⇒ if (sp.sequenceNr == snr) f(sp) else sp))
case None ⇒ messages
}
def delete(pid: String, snr: Long): Unit = messages = messages.get(pid) match {
- case Some(ms) ⇒ messages + (pid -> ms.filterNot(_.sequenceNr == snr))
+ case Some(ms) ⇒ messages + (pid → ms.filterNot(_.sequenceNr == snr))
case None ⇒ messages
}
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala
index a0b36cf306..b63ada5a8b 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala
@@ -55,13 +55,13 @@ private[persistence] trait LeveldbIdMapping extends Actor { this: LeveldbStore
val nextKey = keyFromBytes(nextEntry.getKey)
if (!isMappingKey(nextKey)) pathMap else {
val nextVal = new String(nextEntry.getValue, UTF_8)
- readIdMap(pathMap + (nextVal -> nextKey.mappingId), iter)
+ readIdMap(pathMap + (nextVal → nextKey.mappingId), iter)
}
}
}
private def writeIdMapping(id: String, numericId: Int): Int = {
- idMap = idMap + (id -> numericId)
+ idMap = idMap + (id → numericId)
leveldb.put(keyToBytes(mappingKey(numericId)), id.getBytes(UTF_8))
newPersistenceIdAdded(id)
numericId
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala
index 29cfdd43de..0db740c440 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala
@@ -12,8 +12,8 @@ import java.nio.ByteBuffer
*/
private[leveldb] final case class Key(
persistenceId: Int,
- sequenceNr: Long,
- mappingId: Int)
+ sequenceNr: Long,
+ mappingId: Int)
private[leveldb] object Key {
def keyToBytes(key: Key): Array[Byte] = {
diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala
index c51a6c8463..6eb1931a9d 100644
--- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala
+++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala
@@ -64,7 +64,8 @@ private[persistence] trait LeveldbStore extends Actor with WriteJournalBase with
if (tags.nonEmpty && hasTagSubscribers)
allTags = allTags union tags
- require(!p2.persistenceId.startsWith(tagPersistenceIdPrefix),
+ require(
+ !p2.persistenceId.startsWith(tagPersistenceIdPrefix),
s"persistenceId [${p.persistenceId}] must not start with $tagPersistenceIdPrefix")
addToMessageBatch(p2, tags, batch)
}
diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala
index af56ffa077..9d8d936816 100644
--- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala
@@ -31,19 +31,20 @@ object AtLeastOnceDeliverySpec {
def senderProps(testActor: ActorRef, name: String,
redeliverInterval: FiniteDuration, warnAfterNumberOfUnconfirmedAttempts: Int,
redeliveryBurstLimit: Int,
- destinations: Map[String, ActorPath],
- async: Boolean, actorSelectionDelivery: Boolean = false): Props =
+ destinations: Map[String, ActorPath],
+ async: Boolean, actorSelectionDelivery: Boolean = false): Props =
Props(new Sender(testActor, name, redeliverInterval, warnAfterNumberOfUnconfirmedAttempts,
redeliveryBurstLimit, destinations, async, actorSelectionDelivery))
- class Sender(testActor: ActorRef,
- name: String,
- override val redeliverInterval: FiniteDuration,
- override val warnAfterNumberOfUnconfirmedAttempts: Int,
- override val redeliveryBurstLimit: Int,
- destinations: Map[String, ActorPath],
- async: Boolean,
- actorSelectionDelivery: Boolean)
+ class Sender(
+ testActor: ActorRef,
+ name: String,
+ override val redeliverInterval: FiniteDuration,
+ override val warnAfterNumberOfUnconfirmedAttempts: Int,
+ override val redeliveryBurstLimit: Int,
+ destinations: Map[String, ActorPath],
+ async: Boolean,
+ actorSelectionDelivery: Boolean)
extends PersistentActor with AtLeastOnceDelivery with ActorLogging {
override def persistenceId: String = name
@@ -179,7 +180,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
s"deliver messages in order when nothing is lost (using actorSelection: $deliverUsingActorSelection)" taggedAs (TimingTest) in {
val probe = TestProbe()
val probeA = TestProbe()
- val destinations = Map("A" -> system.actorOf(destinationProps(probeA.ref)).path)
+ val destinations = Map("A" → system.actorOf(destinationProps(probeA.ref)).path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name)
snd.tell(Req("a"), probe.ref)
probe.expectMsg(ReqAck)
@@ -191,7 +192,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val probe = TestProbe()
val probeA = TestProbe()
val dst = system.actorOf(destinationProps(probeA.ref))
- val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path)
+ val destinations = Map("A" → system.actorOf(unreliableProps(3, dst)).path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false, actorSelectionDelivery = deliverUsingActorSelection), name)
snd.tell(Req("a-1"), probe.ref)
probe.expectMsg(ReqAck)
@@ -222,7 +223,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val probe = TestProbe()
val probeA = TestProbe()
val dst = system.actorOf(destinationProps(probeA.ref))
- val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path)
+ val destinations = Map("A" → system.actorOf(unreliableProps(3, dst)).path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name)
snd.tell(Req("a-1"), probe.ref)
probe.expectMsg(ReqAck)
@@ -256,7 +257,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val probe = TestProbe()
val probeA = TestProbe()
val dst = system.actorOf(destinationProps(probeA.ref))
- val destinations = Map("A" -> system.actorOf(unreliableProps(2, dst)).path)
+ val destinations = Map("A" → system.actorOf(unreliableProps(2, dst)).path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name)
snd.tell(Req("a-1"), probe.ref)
probe.expectMsg(ReqAck)
@@ -293,7 +294,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val probe = TestProbe()
val probeA = TestProbe()
val dst = system.actorOf(destinationProps(probeA.ref))
- val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path)
+ val destinations = Map("A" → system.actorOf(unreliableProps(3, dst)).path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name)
snd.tell(Req("a-1"), probe.ref)
probe.expectMsg(ReqAck)
@@ -331,7 +332,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val probe = TestProbe()
val probeA = TestProbe()
val probeB = TestProbe()
- val destinations = Map("A" -> probeA.ref.path, "B" -> probeB.ref.path)
+ val destinations = Map("A" → probeA.ref.path, "B" → probeB.ref.path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 3, 1000, destinations, async = false), name)
snd.tell(Req("a-1"), probe.ref)
snd.tell(Req("b-1"), probe.ref)
@@ -356,9 +357,9 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val dstB = system.actorOf(destinationProps(probeB.ref), "destination-b")
val dstC = system.actorOf(destinationProps(probeC.ref), "destination-c")
val destinations = Map(
- "A" -> system.actorOf(unreliableProps(2, dstA), "unreliable-a").path,
- "B" -> system.actorOf(unreliableProps(5, dstB), "unreliable-b").path,
- "C" -> system.actorOf(unreliableProps(3, dstC), "unreliable-c").path)
+ "A" → system.actorOf(unreliableProps(2, dstA), "unreliable-a").path,
+ "B" → system.actorOf(unreliableProps(5, dstB), "unreliable-b").path,
+ "C" → system.actorOf(unreliableProps(3, dstC), "unreliable-c").path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = true), name)
val N = 100
for (n ← 1 to N) {
@@ -380,7 +381,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c
val probe = TestProbe()
val probeA = TestProbe()
val dst = system.actorOf(destinationProps(probeA.ref))
- val destinations = Map("A" -> system.actorOf(unreliableProps(2, dst)).path)
+ val destinations = Map("A" → system.actorOf(unreliableProps(2, dst)).path)
val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 2, destinations, async = true), name)
diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala
index 464bb01536..d99191744b 100644
--- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala
@@ -29,7 +29,7 @@ object PersistentActorStashingSpec {
case "boom" ⇒ throw new TestException("boom")
case GetState ⇒ sender() ! events.reverse
}
-
+
def unstashBehavior: Receive
def receiveRecover = updateState
@@ -37,27 +37,28 @@ object PersistentActorStashingSpec {
class UserStashPersistentActor(name: String) extends StashExamplePersistentActor(name) {
var stashed = false
-
+
val receiveCommand: Receive = unstashBehavior orElse {
- case Cmd("a") if !stashed ⇒ stash(); stashed = true
- case Cmd("a") ⇒ sender() ! "a"
- case Cmd("b") ⇒ persist(Evt("b"))(evt ⇒ sender() ! evt.data)
+ case Cmd("a") if !stashed ⇒
+ stash(); stashed = true
+ case Cmd("a") ⇒ sender() ! "a"
+ case Cmd("b") ⇒ persist(Evt("b"))(evt ⇒ sender() ! evt.data)
}
-
+
def unstashBehavior: Receive = {
- case Cmd("c") ⇒ unstashAll(); sender () ! "c"
+ case Cmd("c") ⇒ unstashAll(); sender() ! "c"
}
}
-
+
class UserStashWithinHandlerPersistentActor(name: String) extends UserStashPersistentActor(name: String) {
override def unstashBehavior: Receive = {
- case Cmd("c") ⇒ persist(Evt("c")) { evt ⇒ sender() ! evt.data; unstashAll() }
+ case Cmd("c") ⇒ persist(Evt("c")) { evt ⇒ sender() ! evt.data; unstashAll() }
}
}
class UserStashManyPersistentActor(name: String) extends StashExamplePersistentActor(name) {
val receiveCommand: Receive = commonBehavior orElse {
- case Cmd("a") ⇒ persist(Evt("a")) { evt ⇒
+ case Cmd("a") ⇒ persist(Evt("a")) { evt ⇒
updateState(evt)
context.become(processC)
}
@@ -68,14 +69,14 @@ object PersistentActorStashingSpec {
val processC: Receive = unstashBehavior orElse {
case other ⇒ stash()
}
-
+
def unstashBehavior: Receive = {
case Cmd("c") ⇒
persist(Evt("c")) { evt ⇒ updateState(evt); context.unbecome() }
unstashAll()
}
}
-
+
class UserStashWithinHandlerManyPersistentActor(name: String) extends UserStashManyPersistentActor(name) {
override def unstashBehavior: Receive = {
case Cmd("c") ⇒ persist(Evt("c")) { evt ⇒ updateState(evt); context.unbecome(); unstashAll() }
@@ -95,7 +96,7 @@ object PersistentActorStashingSpec {
val otherCommandHandler: Receive = unstashBehavior orElse {
case other ⇒ stash()
}
-
+
def unstashBehavior: Receive = {
case Cmd("c") ⇒
persist(Evt("c")) { evt ⇒
@@ -119,18 +120,19 @@ object PersistentActorStashingSpec {
class AsyncStashingPersistentActor(name: String) extends StashExamplePersistentActor(name) {
var stashed = false
-
+
val receiveCommand: Receive = commonBehavior orElse unstashBehavior orElse {
- case Cmd("a") ⇒ persistAsync(Evt("a"))(updateState)
- case Cmd("b") if !stashed ⇒ stash(); stashed = true
- case Cmd("b") ⇒ persistAsync(Evt("b"))(updateState)
+ case Cmd("a") ⇒ persistAsync(Evt("a"))(updateState)
+ case Cmd("b") if !stashed ⇒
+ stash(); stashed = true
+ case Cmd("b") ⇒ persistAsync(Evt("b"))(updateState)
}
override def unstashBehavior: Receive = {
case Cmd("c") ⇒ persistAsync(Evt("c"))(updateState); unstashAll()
}
}
-
+
class AsyncStashingWithinHandlerPersistentActor(name: String) extends AsyncStashingPersistentActor(name) {
override def unstashBehavior: Receive = {
case Cmd("c") ⇒ persistAsync(Evt("c")) { evt ⇒ updateState(evt); unstashAll() }
@@ -143,7 +145,7 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp
with ImplicitSender {
import PersistentActorStashingSpec._
- def stash[T <: NamedPersistentActor : ClassTag](): Unit = {
+ def stash[T <: NamedPersistentActor: ClassTag](): Unit = {
"support user stash operations" in {
val persistentActor = namedPersistentActor[T]
persistentActor ! Cmd("a")
@@ -155,7 +157,7 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp
}
}
- def stashWithSeveralMessages[T <: NamedPersistentActor : ClassTag](): Unit = {
+ def stashWithSeveralMessages[T <: NamedPersistentActor: ClassTag](): Unit = {
"support user stash operations with several stashed messages" in {
val persistentActor = namedPersistentActor[T]
val n = 10
@@ -168,7 +170,7 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp
}
}
- def stashUnderFailures[T <: NamedPersistentActor : ClassTag](): Unit = {
+ def stashUnderFailures[T <: NamedPersistentActor: ClassTag](): Unit = {
"support user stash operations under failures" in {
val persistentActor = namedPersistentActor[T]
val bs = 1 to 10 map ("b-" + _)
@@ -185,13 +187,13 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp
behave like stashWithSeveralMessages[UserStashManyPersistentActor]()
behave like stashUnderFailures[UserStashFailurePersistentActor]()
}
-
+
"Stashing(unstashAll called in handler) in a persistent actor" must {
behave like stash[UserStashWithinHandlerPersistentActor]()
behave like stashWithSeveralMessages[UserStashWithinHandlerManyPersistentActor]()
behave like stashUnderFailures[UserStashWithinHandlerFailureCallbackPersistentActor]()
}
-
+
}
class SteppingInMemPersistentActorStashingSpec extends PersistenceSpec(
@@ -199,7 +201,7 @@ class SteppingInMemPersistentActorStashingSpec extends PersistenceSpec(
with ImplicitSender {
import PersistentActorStashingSpec._
- def stash[T <: NamedPersistentActor : ClassTag](): Unit = {
+ def stash[T <: NamedPersistentActor: ClassTag](): Unit = {
"handle async callback not happening until next message has been stashed" in {
val persistentActor = namedPersistentActor[T]
awaitAssert(SteppingInmemJournal.getRef("persistence-stash"), 3.seconds)
@@ -231,7 +233,7 @@ class SteppingInMemPersistentActorStashingSpec extends PersistenceSpec(
"Stashing in a persistent actor mixed with persistAsync" must {
behave like stash[AsyncStashingPersistentActor]()
}
-
+
"Stashing(unstashAll called in handler) in a persistent actor mixed with persistAsync" must {
behave like stash[AsyncStashingWithinHandlerPersistentActor]()
}
diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala
index 6b2f70d299..0477f79b06 100644
--- a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala
+++ b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala
@@ -47,7 +47,7 @@ object SteppingInmemJournal {
def getRef(instanceId: String): ActorRef = synchronized(_current(instanceId))
private def putRef(instanceId: String, instance: ActorRef): Unit = synchronized {
- _current = _current + (instanceId -> instance)
+ _current = _current + (instanceId → instance)
}
private def remove(instanceId: String): Unit = synchronized(
_current -= instanceId)
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala
index 84dbf87593..1683bba4ba 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala
@@ -31,7 +31,7 @@ object RemoteQuarantinePiercingSpec extends MultiNodeConfig {
class Subject extends Actor {
def receive = {
case "shutdown" ⇒ context.system.terminate()
- case "identify" ⇒ sender() ! (AddressUidExtension(context.system).addressUid -> self)
+ case "identify" ⇒ sender() ! (AddressUidExtension(context.system).addressUid → self)
}
}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala
index b5aa2c8168..10837756f1 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala
@@ -46,7 +46,7 @@ object RemoteRestartedQuarantinedSpec extends MultiNodeConfig {
class Subject extends Actor {
def receive = {
case "shutdown" ⇒ context.system.terminate()
- case "identify" ⇒ sender() ! (AddressUidExtension(context.system).addressUid -> self)
+ case "identify" ⇒ sender() ! (AddressUidExtension(context.system).addressUid → self)
}
}
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala
index 0d9662d289..9270c85e25 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala
@@ -71,8 +71,8 @@ class RemoteRandomSpec extends MultiNodeSpec(RemoteRandomMultiJvmSpec)
val replies: Map[Address, Int] = (receiveWhile(5.seconds, messages = connectionCount * iterationCount) {
case ref: ActorRef ⇒ ref.path.address
- }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ }).foldLeft(Map(node(first).address → 0, node(second).address → 0, node(third).address → 0)) {
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
enterBarrier("broadcast-end")
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala
index 614be32426..c80789bd0c 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala
@@ -99,8 +99,8 @@ class RemoteRoundRobinSpec extends MultiNodeSpec(RemoteRoundRobinMultiJvmSpec)
val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) {
case ref: ActorRef ⇒ ref.path.address
- }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ }).foldLeft(Map(node(first).address → 0, node(second).address → 0, node(third).address → 0)) {
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
enterBarrier("broadcast-end")
@@ -184,8 +184,8 @@ class RemoteRoundRobinSpec extends MultiNodeSpec(RemoteRoundRobinMultiJvmSpec)
val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) {
case ref: ActorRef ⇒ ref.path.address
- }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ }).foldLeft(Map(node(first).address → 0, node(second).address → 0, node(third).address → 0)) {
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
enterBarrier("end")
diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala
index cbad189ac5..6f0fdb4412 100644
--- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala
+++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala
@@ -74,8 +74,8 @@ class RemoteScatterGatherSpec extends MultiNodeSpec(RemoteScatterGatherMultiJvmS
val replies: Map[Address, Int] = (receiveWhile(5.seconds, messages = connectionCount * iterationCount) {
case ref: ActorRef ⇒ ref.path.address
- }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) {
- case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ }).foldLeft(Map(node(first).address → 0, node(second).address → 0, node(third).address → 0)) {
+ case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1))
}
enterBarrier("broadcast-end")
diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala
index 4b547174e2..d00782f7d4 100644
--- a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala
+++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala
@@ -128,8 +128,8 @@ class LogRoleReplace {
line match {
case RoleStarted(jvm, role, host, port) ⇒
- replacements += (jvm -> role)
- replacements += ((host + ":" + port) -> role)
+ replacements += (jvm → role)
+ replacements += ((host + ":" + port) → role)
false
case _ ⇒ true
}
diff --git a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala
index 75595812b2..e845a13b50 100644
--- a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala
+++ b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala
@@ -90,8 +90,8 @@ class ResendUnfulfillableException
final case class AckedSendBuffer[T <: HasSequenceNumber](
capacity: Int,
nonAcked: IndexedSeq[T] = Vector.empty[T],
- nacked: IndexedSeq[T] = Vector.empty[T],
- maxSeq: SeqNo = SeqNo(-1)) {
+ nacked: IndexedSeq[T] = Vector.empty[T],
+ maxSeq: SeqNo = SeqNo(-1)) {
/**
* Processes an incoming acknowledgement and returns a new buffer with only unacknowledged elements remaining.
@@ -137,9 +137,9 @@ final case class AckedSendBuffer[T <: HasSequenceNumber](
* @param buf Buffer of messages that are waiting for delivery
*/
final case class AckedReceiveBuffer[T <: HasSequenceNumber](
- lastDelivered: SeqNo = SeqNo(-1),
- cumulativeAck: SeqNo = SeqNo(-1),
- buf: SortedSet[T] = TreeSet.empty[T])(implicit val seqOrdering: Ordering[T]) {
+ lastDelivered: SeqNo = SeqNo(-1),
+ cumulativeAck: SeqNo = SeqNo(-1),
+ buf: SortedSet[T] = TreeSet.empty[T])(implicit val seqOrdering: Ordering[T]) {
import SeqNo.ord.max
diff --git a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala
index a04283440f..560533af27 100644
--- a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala
+++ b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala
@@ -28,8 +28,9 @@ import akka.util.Helpers.ConfigOps
*/
class DeadlineFailureDetector(
val acceptableHeartbeatPause: FiniteDuration,
- val heartbeatInterval: FiniteDuration)(
- implicit clock: Clock) extends FailureDetector {
+ val heartbeatInterval: FiniteDuration)(
+ implicit
+ clock: Clock) extends FailureDetector {
/**
* Constructor that reads parameters from config.
diff --git a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala
index 5d20087f30..20462a5c99 100644
--- a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala
+++ b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala
@@ -48,7 +48,7 @@ class DefaultFailureDetectorRegistry[A](detectorFactory: () ⇒ FailureDetector)
case None ⇒
val newDetector: FailureDetector = detectorFactory()
newDetector.heartbeat()
- resourceToFailureDetector.set(oldTable + (resource -> newDetector))
+ resourceToFailureDetector.set(oldTable + (resource → newDetector))
}
} finally failureDetectorCreationLock.unlock()
}
diff --git a/akka-remote/src/main/scala/akka/remote/Endpoint.scala b/akka-remote/src/main/scala/akka/remote/Endpoint.scala
index 48f06d2513..c92f1b1032 100644
--- a/akka-remote/src/main/scala/akka/remote/Endpoint.scala
+++ b/akka-remote/src/main/scala/akka/remote/Endpoint.scala
@@ -32,25 +32,28 @@ import scala.concurrent.Future
* INTERNAL API
*/
private[remote] trait InboundMessageDispatcher {
- def dispatch(recipient: InternalActorRef,
- recipientAddress: Address,
- serializedMessage: SerializedMessage,
- senderOption: Option[ActorRef]): Unit
+ def dispatch(
+ recipient: InternalActorRef,
+ recipientAddress: Address,
+ serializedMessage: SerializedMessage,
+ senderOption: Option[ActorRef]): Unit
}
/**
* INTERNAL API
*/
-private[remote] class DefaultMessageDispatcher(private val system: ExtendedActorSystem,
- private val provider: RemoteActorRefProvider,
- private val log: LoggingAdapter) extends InboundMessageDispatcher {
+private[remote] class DefaultMessageDispatcher(
+ private val system: ExtendedActorSystem,
+ private val provider: RemoteActorRefProvider,
+ private val log: LoggingAdapter) extends InboundMessageDispatcher {
private val remoteDaemon = provider.remoteDaemon
- override def dispatch(recipient: InternalActorRef,
- recipientAddress: Address,
- serializedMessage: SerializedMessage,
- senderOption: Option[ActorRef]): Unit = {
+ override def dispatch(
+ recipient: InternalActorRef,
+ recipientAddress: Address,
+ serializedMessage: SerializedMessage,
+ senderOption: Option[ActorRef]): Unit = {
import provider.remoteSettings._
@@ -76,8 +79,9 @@ private[remote] class DefaultMessageDispatcher(private val system: ExtendedActor
case sel: ActorSelectionMessage ⇒
if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) ||
sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian))
- log.debug("operating in UntrustedMode, dropping inbound actor selection to [{}], " +
- "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration",
+ log.debug(
+ "operating in UntrustedMode, dropping inbound actor selection to [{}], " +
+ "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration",
sel.elements.mkString("/", "/", ""))
else
// run the receive logic for ActorSelectionMessage here to make sure it is not stuck on busy user actor
@@ -94,10 +98,12 @@ private[remote] class DefaultMessageDispatcher(private val system: ExtendedActor
// if it was originally addressed to us but is in fact remote from our point of view (i.e. remote-deployed)
r.!(payload)(sender)
else
- log.error("dropping message [{}] for non-local recipient [{}] arriving at [{}] inbound addresses are [{}]",
+ log.error(
+ "dropping message [{}] for non-local recipient [{}] arriving at [{}] inbound addresses are [{}]",
payloadClass, r, recipientAddress, provider.transport.addresses.mkString(", "))
- case r ⇒ log.error("dropping message [{}] for unknown recipient [{}] arriving at [{}] inbound addresses are [{}]",
+ case r ⇒ log.error(
+ "dropping message [{}] for unknown recipient [{}] arriving at [{}] inbound addresses are [{}]",
payloadClass, r, recipientAddress, provider.transport.addresses.mkString(", "))
}
@@ -129,10 +135,11 @@ private[remote] final case class ShutDownAssociation(localAddress: Address, remo
* INTERNAL API
*/
@SerialVersionUID(2L)
-private[remote] final case class InvalidAssociation(localAddress: Address,
- remoteAddress: Address,
- cause: Throwable,
- disassociationInfo: Option[DisassociateInfo] = None)
+private[remote] final case class InvalidAssociation(
+ localAddress: Address,
+ remoteAddress: Address,
+ cause: Throwable,
+ disassociationInfo: Option[DisassociateInfo] = None)
extends EndpointException("Invalid address: " + remoteAddress, cause) with AssociationProblem
/**
@@ -174,12 +181,12 @@ private[remote] object ReliableDeliverySupervisor {
def props(
handleOrActive: Option[AkkaProtocolHandle],
- localAddress: Address,
- remoteAddress: Address,
- refuseUid: Option[Int],
- transport: AkkaProtocolTransport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
+ localAddress: Address,
+ remoteAddress: Address,
+ refuseUid: Option[Int],
+ transport: AkkaProtocolTransport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props =
Props(classOf[ReliableDeliverySupervisor], handleOrActive, localAddress, remoteAddress, refuseUid, transport, settings,
codec, receiveBuffers)
@@ -189,13 +196,13 @@ private[remote] object ReliableDeliverySupervisor {
* INTERNAL API
*/
private[remote] class ReliableDeliverySupervisor(
- handleOrActive: Option[AkkaProtocolHandle],
- val localAddress: Address,
- val remoteAddress: Address,
- val refuseUid: Option[Int],
- val transport: AkkaProtocolTransport,
- val settings: RemoteSettings,
- val codec: AkkaPduCodec,
+ handleOrActive: Option[AkkaProtocolHandle],
+ val localAddress: Address,
+ val remoteAddress: Address,
+ val refuseUid: Option[Int],
+ val transport: AkkaProtocolTransport,
+ val settings: RemoteSettings,
+ val codec: AkkaPduCodec,
val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends Actor with ActorLogging {
import ReliableDeliverySupervisor._
import context.dispatcher
@@ -209,7 +216,8 @@ private[remote] class ReliableDeliverySupervisor(
case e @ (_: AssociationProblem) ⇒ Escalate
case NonFatal(e) ⇒
val causedBy = if (e.getCause == null) "" else s"Caused by: [${e.getCause.getMessage}]"
- log.warning("Association with remote system [{}] has failed, address is now gated for [{}] ms. Reason: [{}] {}",
+ log.warning(
+ "Association with remote system [{}] has failed, address is now gated for [{}] ms. Reason: [{}] {}",
remoteAddress, settings.RetryGateClosedFor.toMillis, e.getMessage, causedBy)
uidConfirmed = false // Need confirmation of UID again
if (bufferWasInUse) {
@@ -437,11 +445,11 @@ private[remote] class ReliableDeliverySupervisor(
* INTERNAL API
*/
private[remote] abstract class EndpointActor(
- val localAddress: Address,
+ val localAddress: Address,
val remoteAddress: Address,
- val transport: Transport,
- val settings: RemoteSettings,
- val codec: AkkaPduCodec) extends Actor with ActorLogging {
+ val transport: Transport,
+ val settings: RemoteSettings,
+ val codec: AkkaPduCodec) extends Actor with ActorLogging {
def inbound: Boolean
@@ -463,14 +471,14 @@ private[remote] abstract class EndpointActor(
private[remote] object EndpointWriter {
def props(
- handleOrActive: Option[AkkaProtocolHandle],
- localAddress: Address,
- remoteAddress: Address,
- refuseUid: Option[Int],
- transport: AkkaProtocolTransport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- receiveBuffers: ConcurrentHashMap[Link, ResendState],
+ handleOrActive: Option[AkkaProtocolHandle],
+ localAddress: Address,
+ remoteAddress: Address,
+ refuseUid: Option[Int],
+ transport: AkkaProtocolTransport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ receiveBuffers: ConcurrentHashMap[Link, ResendState],
reliableDeliverySupervisor: Option[ActorRef]): Props =
Props(classOf[EndpointWriter], handleOrActive, localAddress, remoteAddress, refuseUid, transport, settings, codec,
receiveBuffers, reliableDeliverySupervisor)
@@ -508,14 +516,14 @@ private[remote] object EndpointWriter {
* INTERNAL API
*/
private[remote] class EndpointWriter(
- handleOrActive: Option[AkkaProtocolHandle],
- localAddress: Address,
- remoteAddress: Address,
- refuseUid: Option[Int],
- transport: AkkaProtocolTransport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- val receiveBuffers: ConcurrentHashMap[Link, ResendState],
+ handleOrActive: Option[AkkaProtocolHandle],
+ localAddress: Address,
+ remoteAddress: Address,
+ refuseUid: Option[Int],
+ transport: AkkaProtocolTransport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ val receiveBuffers: ConcurrentHashMap[Link, ResendState],
val reliableDeliverySupervisor: Option[ActorRef])
extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) {
@@ -704,8 +712,9 @@ private[remote] class EndpointWriter(
if (size > settings.LogBufferSizeExceeding) {
val now = System.nanoTime()
if (now - largeBufferLogTimestamp >= LogBufferSizeInterval) {
- log.warning("[{}] buffered messages in EndpointWriter for [{}]. " +
- "You should probably implement flow control to avoid flooding the remote connection.",
+ log.warning(
+ "[{}] buffered messages in EndpointWriter for [{}]. " +
+ "You should probably implement flow control to avoid flooding the remote connection.",
size, remoteAddress)
largeBufferLogTimestamp = now
}
@@ -888,16 +897,16 @@ private[remote] class EndpointWriter(
private[remote] object EndpointReader {
def props(
- localAddress: Address,
- remoteAddress: Address,
- transport: Transport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- msgDispatch: InboundMessageDispatcher,
- inbound: Boolean,
- uid: Int,
+ localAddress: Address,
+ remoteAddress: Address,
+ transport: Transport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ msgDispatch: InboundMessageDispatcher,
+ inbound: Boolean,
+ uid: Int,
reliableDeliverySupervisor: Option[ActorRef],
- receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props =
+ receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props =
Props(classOf[EndpointReader], localAddress, remoteAddress, transport, settings, codec, msgDispatch, inbound,
uid, reliableDeliverySupervisor, receiveBuffers)
@@ -907,16 +916,16 @@ private[remote] object EndpointReader {
* INTERNAL API
*/
private[remote] class EndpointReader(
- localAddress: Address,
- remoteAddress: Address,
- transport: Transport,
- settings: RemoteSettings,
- codec: AkkaPduCodec,
- msgDispatch: InboundMessageDispatcher,
- val inbound: Boolean,
- val uid: Int,
+ localAddress: Address,
+ remoteAddress: Address,
+ transport: Transport,
+ settings: RemoteSettings,
+ codec: AkkaPduCodec,
+ msgDispatch: InboundMessageDispatcher,
+ val inbound: Boolean,
+ val uid: Int,
val reliableDeliverySupervisor: Option[ActorRef],
- val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) {
+ val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) {
import EndpointWriter.{ OutboundAck, StopReading, StoppedReading }
@@ -972,8 +981,9 @@ private[remote] class EndpointReader(
}
case InboundPayload(oversized) ⇒
- log.error(new OversizedPayloadException(s"Discarding oversized payload received: " +
- s"max allowed size [${transport.maximumPayloadBytes}] bytes, actual size [${oversized.size}] bytes."),
+ log.error(
+ new OversizedPayloadException(s"Discarding oversized payload received: " +
+ s"max allowed size [${transport.maximumPayloadBytes}] bytes, actual size [${oversized.size}] bytes."),
"Transient error while reading from association (association remains live)")
case StopReading(writer, replyTo) ⇒
diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala
index edc7cdfd82..49fd506f03 100644
--- a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala
+++ b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala
@@ -67,11 +67,11 @@ private[akka] object FailureDetectorLoader {
def load(fqcn: String, config: Config, system: ActorSystem): FailureDetector = {
system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[FailureDetector](
fqcn, List(
- classOf[Config] -> config,
- classOf[EventStream] -> system.eventStream)).recover({
- case e ⇒ throw new ConfigurationException(
- s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e)
- }).get
+ classOf[Config] → config,
+ classOf[EventStream] → system.eventStream)).recover({
+ case e ⇒ throw new ConfigurationException(
+ s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e)
+ }).get
}
/**
diff --git a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala
index 2bff7f13c7..8f208d3309 100644
--- a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala
+++ b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala
@@ -54,12 +54,13 @@ import akka.util.Helpers.ConfigOps
* purposes. It is only used for measuring intervals (duration).
*/
class PhiAccrualFailureDetector(
- val threshold: Double,
- val maxSampleSize: Int,
- val minStdDeviation: FiniteDuration,
+ val threshold: Double,
+ val maxSampleSize: Int,
+ val minStdDeviation: FiniteDuration,
val acceptableHeartbeatPause: FiniteDuration,
- val firstHeartbeatEstimate: FiniteDuration)(
- implicit clock: Clock) extends FailureDetector {
+ val firstHeartbeatEstimate: FiniteDuration)(
+ implicit
+ clock: Clock) extends FailureDetector {
/**
* Constructor that reads parameters from config.
@@ -203,9 +204,9 @@ private[akka] object HeartbeatHistory {
* for empty HeartbeatHistory, i.e. throws ArithmeticException.
*/
private[akka] final case class HeartbeatHistory private (
- maxSampleSize: Int,
- intervals: immutable.IndexedSeq[Long],
- intervalSum: Long,
+ maxSampleSize: Int,
+ intervals: immutable.IndexedSeq[Long],
+ intervalSum: Long,
squaredIntervalSum: Long) {
// Heartbeat histories are created trough the firstHeartbeat variable of the PhiAccrualFailureDetector
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
index 7546dc7ee1..4ad398f696 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
@@ -79,9 +79,10 @@ private[akka] object RemoteActorRefProvider {
* and handled as dead letters to the original (remote) destination. Without this special case, DeathWatch related
* functionality breaks, like the special handling of Watch messages arriving to dead letters.
*/
- private class RemoteDeadLetterActorRef(_provider: ActorRefProvider,
- _path: ActorPath,
- _eventStream: EventStream) extends DeadLetterActorRef(_provider, _path, _eventStream) {
+ private class RemoteDeadLetterActorRef(
+ _provider: ActorRefProvider,
+ _path: ActorPath,
+ _eventStream: EventStream) extends DeadLetterActorRef(_provider, _path, _eventStream) {
import EndpointManager.Send
override def !(message: Any)(implicit sender: ActorRef): Unit = message match {
@@ -109,9 +110,9 @@ private[akka] object RemoteActorRefProvider {
*
*/
private[akka] class RemoteActorRefProvider(
- val systemName: String,
- val settings: ActorSystem.Settings,
- val eventStream: EventStream,
+ val systemName: String,
+ val settings: ActorSystem.Settings,
+ val eventStream: EventStream,
val dynamicAccess: DynamicAccess) extends ActorRefProvider {
import RemoteActorRefProvider._
@@ -168,16 +169,16 @@ private[akka] class RemoteActorRefProvider(
val internals = Internals(
remoteDaemon = {
- val d = new RemoteSystemDaemon(
- system,
- local.rootPath / "remote",
- rootGuardian,
- remotingTerminator,
- log,
- untrustedMode = remoteSettings.UntrustedMode)
- local.registerExtraNames(Map(("remote", d)))
- d
- },
+ val d = new RemoteSystemDaemon(
+ system,
+ local.rootPath / "remote",
+ rootGuardian,
+ remotingTerminator,
+ log,
+ untrustedMode = remoteSettings.UntrustedMode)
+ local.registerExtraNames(Map(("remote", d)))
+ d
+ },
serialization = SerializationExtension(system),
transport = new Remoting(system, this))
@@ -440,12 +441,12 @@ private[akka] trait RemoteRef extends ActorRefScope {
* This reference is network-aware (remembers its origin) and immutable.
*/
private[akka] class RemoteActorRef private[akka] (
- remote: RemoteTransport,
+ remote: RemoteTransport,
val localAddressToUse: Address,
- val path: ActorPath,
- val getParent: InternalActorRef,
- props: Option[Props],
- deploy: Option[Deploy])
+ val path: ActorPath,
+ val getParent: InternalActorRef,
+ props: Option[Props],
+ deploy: Option[Deploy])
extends InternalActorRef with RemoteRef {
def getChild(name: Iterator[String]): InternalActorRef = {
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala
index b1b744fec1..618b71aa3e 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala
@@ -42,11 +42,11 @@ private[akka] final case class DaemonMsgCreate(props: Props, deploy: Deploy, pat
* It acts as the brain of the remote that responds to system remote events (messages) and undertakes action.
*/
private[akka] class RemoteSystemDaemon(
- system: ActorSystemImpl,
- _path: ActorPath,
- _parent: InternalActorRef,
- terminator: ActorRef,
- _log: LoggingAdapter,
+ system: ActorSystemImpl,
+ _path: ActorPath,
+ _parent: InternalActorRef,
+ terminator: ActorRef,
+ _log: LoggingAdapter,
val untrustedMode: Boolean)
extends VirtualPathContainer(system.provider, _path, _parent, _log) {
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala
index 2c8d248d2b..cbcc36e28e 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala
@@ -30,7 +30,7 @@ private[akka] class RemoteDeploymentWatcher extends Actor with RequiresMessageQu
def receive = {
case WatchRemote(a, supervisor: InternalActorRef) ⇒
- supervisors += (a -> supervisor)
+ supervisors += (a → supervisor)
context.watch(a)
case t @ Terminated(a) if supervisors isDefinedAt a ⇒
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala
index e35aee092a..33a5a01d34 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala
@@ -94,7 +94,8 @@ final class RemoteSettings(val config: Config) {
} requiring (_ > Duration.Zero, "quarantine-after-silence must be > 0")
val QuarantineDuration: FiniteDuration = {
- config.getMillisDuration("akka.remote.prune-quarantine-marker-after").requiring(_ > Duration.Zero,
+ config.getMillisDuration("akka.remote.prune-quarantine-marker-after").requiring(
+ _ > Duration.Zero,
"prune-quarantine-marker-after must be > 0 ms")
}
@@ -116,7 +117,8 @@ final class RemoteSettings(val config: Config) {
val Transports: immutable.Seq[(String, immutable.Seq[String], Config)] = transportNames.map { name ⇒
val transportConfig = transportConfigFor(name)
- (transportConfig.getString("transport-class"),
+ (
+ transportConfig.getString("transport-class"),
immutableSeq(transportConfig.getStringList("applied-adapters")).reverse,
transportConfig)
}
diff --git a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala
index 4fc7e0cb44..1da2c8c797 100644
--- a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala
@@ -21,9 +21,9 @@ private[akka] object RemoteWatcher {
* Factory method for `RemoteWatcher` [[akka.actor.Props]].
*/
def props(
- failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
heartbeatExpectedResponseAfter: FiniteDuration): Props =
Props(classOf[RemoteWatcher], failureDetector, heartbeatInterval, unreachableReaperInterval,
heartbeatExpectedResponseAfter).withDeploy(Deploy.local)
@@ -44,8 +44,9 @@ private[akka] object RemoteWatcher {
lazy val empty: Stats = counts(0, 0)
def counts(watching: Int, watchingNodes: Int): Stats = Stats(watching, watchingNodes)(Set.empty, Set.empty)
}
- final case class Stats(watching: Int, watchingNodes: Int)(val watchingRefs: Set[(ActorRef, ActorRef)],
- val watchingAddresses: Set[Address]) {
+ final case class Stats(watching: Int, watchingNodes: Int)(
+ val watchingRefs: Set[(ActorRef, ActorRef)],
+ val watchingAddresses: Set[Address]) {
override def toString: String = {
def formatWatchingRefs: String =
watchingRefs.map(x ⇒ x._2.path.name + " -> " + x._1.path.name).mkString("[", ", ", "]")
@@ -78,9 +79,9 @@ private[akka] object RemoteWatcher {
*
*/
private[akka] class RemoteWatcher(
- failureDetector: FailureDetectorRegistry[Address],
- heartbeatInterval: FiniteDuration,
- unreachableReaperInterval: FiniteDuration,
+ failureDetector: FailureDetectorRegistry[Address],
+ heartbeatInterval: FiniteDuration,
+ unreachableReaperInterval: FiniteDuration,
heartbeatExpectedResponseAfter: FiniteDuration)
extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
diff --git a/akka-remote/src/main/scala/akka/remote/Remoting.scala b/akka-remote/src/main/scala/akka/remote/Remoting.scala
index 4a611b6320..5b54b33148 100644
--- a/akka-remote/src/main/scala/akka/remote/Remoting.scala
+++ b/akka-remote/src/main/scala/akka/remote/Remoting.scala
@@ -178,13 +178,14 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc
val addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]] = Promise()
manager ! Listen(addressesPromise)
- val transports: Seq[(AkkaProtocolTransport, Address)] = Await.result(addressesPromise.future,
+ val transports: Seq[(AkkaProtocolTransport, Address)] = Await.result(
+ addressesPromise.future,
StartupTimeout.duration)
if (transports.isEmpty) throw new RemoteTransportException("No transport drivers were loaded.", null)
transportMapping = transports.groupBy {
case (transport, _) ⇒ transport.schemeIdentifier
- } map { case (k, v) ⇒ k -> v.toSet }
+ } map { case (k, v) ⇒ k → v.toSet }
defaultAddress = transports.head._2
addresses = transports.map { _._2 }.toSet
@@ -233,7 +234,7 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc
private[akka] def boundAddresses: Map[String, Set[Address]] = {
transportMapping.map {
case (scheme, transports) ⇒
- scheme -> transports.flatMap {
+ scheme → transports.flatMap {
// Need to do like this for binary compatibility reasons
case (t, _) ⇒ Option(t.boundAddress)
}
@@ -265,8 +266,9 @@ private[remote] object EndpointManager {
// Messages internal to EndpointManager
case object Prune extends NoSerializationVerificationNeeded
- final case class ListensResult(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]],
- results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])])
+ final case class ListensResult(
+ addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]],
+ results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])])
extends NoSerializationVerificationNeeded
final case class ListensFailure(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]], cause: Throwable)
extends NoSerializationVerificationNeeded
@@ -308,30 +310,30 @@ private[remote] object EndpointManager {
case Some(Pass(e, _, _)) ⇒
throw new IllegalArgumentException(s"Attempting to overwrite existing endpoint [$e] with [$endpoint]")
case _ ⇒
- addressToWritable += address -> Pass(endpoint, uid, refuseUid)
- writableToAddress += endpoint -> address
+ addressToWritable += address → Pass(endpoint, uid, refuseUid)
+ writableToAddress += endpoint → address
endpoint
}
def registerWritableEndpointUid(remoteAddress: Address, uid: Int): Unit = {
addressToWritable.get(remoteAddress) match {
- case Some(Pass(ep, _, refuseUid)) ⇒ addressToWritable += remoteAddress -> Pass(ep, Some(uid), refuseUid)
+ case Some(Pass(ep, _, refuseUid)) ⇒ addressToWritable += remoteAddress → Pass(ep, Some(uid), refuseUid)
case other ⇒
}
}
def registerWritableEndpointRefuseUid(remoteAddress: Address, refuseUid: Int): Unit = {
addressToWritable.get(remoteAddress) match {
- case Some(Pass(ep, uid, _)) ⇒ addressToWritable += remoteAddress -> Pass(ep, uid, Some(refuseUid))
- case Some(g: Gated) ⇒ addressToWritable += remoteAddress -> g.copy(refuseUid = Some(refuseUid))
- case Some(w: WasGated) ⇒ addressToWritable += remoteAddress -> w.copy(refuseUid = Some(refuseUid))
+ case Some(Pass(ep, uid, _)) ⇒ addressToWritable += remoteAddress → Pass(ep, uid, Some(refuseUid))
+ case Some(g: Gated) ⇒ addressToWritable += remoteAddress → g.copy(refuseUid = Some(refuseUid))
+ case Some(w: WasGated) ⇒ addressToWritable += remoteAddress → w.copy(refuseUid = Some(refuseUid))
case other ⇒
}
}
def registerReadOnlyEndpoint(address: Address, endpoint: ActorRef, uid: Int): ActorRef = {
- addressToReadonly += address -> ((endpoint, uid))
- readonlyToAddress += endpoint -> address
+ addressToReadonly += address → ((endpoint, uid))
+ readonlyToAddress += endpoint → address
endpoint
}
@@ -390,14 +392,14 @@ private[remote] object EndpointManager {
addressToWritable.get(address) match {
case Some(Quarantined(_, _)) ⇒ // don't overwrite Quarantined with Gated
case Some(Pass(_, _, refuseUid)) ⇒
- addressToWritable += address -> Gated(timeOfRelease, refuseUid)
+ addressToWritable += address → Gated(timeOfRelease, refuseUid)
writableToAddress -= endpoint
case Some(WasGated(refuseUid)) ⇒
- addressToWritable += address -> Gated(timeOfRelease, refuseUid)
+ addressToWritable += address → Gated(timeOfRelease, refuseUid)
writableToAddress -= endpoint
case Some(Gated(_, _)) ⇒ // already gated
case None ⇒
- addressToWritable += address -> Gated(timeOfRelease, refuseUid = None)
+ addressToWritable += address → Gated(timeOfRelease, refuseUid = None)
writableToAddress -= endpoint
}
} else if (isReadOnly(endpoint)) {
@@ -406,7 +408,7 @@ private[remote] object EndpointManager {
}
def markAsQuarantined(address: Address, uid: Int, timeOfRelease: Deadline): Unit =
- addressToWritable += address -> Quarantined(uid, timeOfRelease)
+ addressToWritable += address → Quarantined(uid, timeOfRelease)
def removePolicy(address: Address): Unit =
addressToWritable -= address
@@ -417,7 +419,7 @@ private[remote] object EndpointManager {
addressToWritable = addressToWritable.collect {
case entry @ (key, Gated(timeOfRelease, refuseUid)) ⇒
if (timeOfRelease.hasTimeLeft) entry
- else (key -> WasGated(refuseUid))
+ else (key → WasGated(refuseUid))
case entry @ (_, Quarantined(_, timeOfRelease)) if timeOfRelease.hasTimeLeft ⇒
// Querantined removed when no time left
entry
@@ -475,9 +477,10 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
case e @ InvalidAssociation(localAddress, remoteAddress, reason, disassiciationInfo) ⇒
keepQuarantinedOr(remoteAddress) {
val causedBy = if (reason.getCause == null) "" else s"Caused by: [${reason.getCause.getMessage}]"
- log.warning("Tried to associate with unreachable remote address [{}]. " +
- "Address is now gated for {} ms, all messages to this address will be delivered to dead letters. " +
- "Reason: [{}] {}",
+ log.warning(
+ "Tried to associate with unreachable remote address [{}]. " +
+ "Address is now gated for {} ms, all messages to this address will be delivered to dead letters. " +
+ "Reason: [{}] {}",
remoteAddress, settings.RetryGateClosedFor.toMillis, reason.getMessage, causedBy)
endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor)
}
@@ -490,8 +493,9 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
case ShutDownAssociation(localAddress, remoteAddress, _) ⇒
keepQuarantinedOr(remoteAddress) {
- log.debug("Remote system with address [{}] has shut down. " +
- "Address is now gated for {} ms, all messages to this address will be delivered to dead letters.",
+ log.debug(
+ "Remote system with address [{}] has shut down. " +
+ "Address is now gated for {} ms, all messages to this address will be delivered to dead letters.",
remoteAddress, settings.RetryGateClosedFor.toMillis)
endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor)
}
@@ -510,8 +514,9 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
case HopelessAssociation(localAddress, remoteAddress, None, _) ⇒
keepQuarantinedOr(remoteAddress) {
- log.warning("Association to [{}] with unknown UID is irrecoverably failed. " +
- "Address cannot be quarantined without knowing the UID, gating instead for {} ms.",
+ log.warning(
+ "Association to [{}] with unknown UID is irrecoverably failed. " +
+ "Address cannot be quarantined without knowing the UID, gating instead for {} ms.",
remoteAddress, settings.RetryGateClosedFor.toMillis)
endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor)
}
@@ -540,13 +545,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
} map {
case (a, t) if t.size > 1 ⇒
throw new RemoteTransportException(s"There are more than one transports listening on local address [$a]", null)
- case (a, t) ⇒ a -> t.head._1
+ case (a, t) ⇒ a → t.head._1
}
// Register to each transport as listener and collect mapping to addresses
val transportsAndAddresses = results map {
case (transport, address, promise) ⇒
promise.success(ActorAssociationEventListener(self))
- transport -> address
+ transport → address
}
addressesPromise.success(transportsAndAddresses)
case ListensFailure(addressesPromise, cause) ⇒
@@ -574,8 +579,9 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
(endpoints.writableEndpointWithPolicyFor(address), uidToQuarantineOption) match {
case (Some(Pass(endpoint, _, _)), None) ⇒
context.stop(endpoint)
- log.warning("Association to [{}] with unknown UID is reported as quarantined, but " +
- "address cannot be quarantined without knowing the UID, gating instead for {} ms.",
+ log.warning(
+ "Association to [{}] with unknown UID is reported as quarantined, but " +
+ "address cannot be quarantined without knowing the UID, gating instead for {} ms.",
address, settings.RetryGateClosedFor.toMillis)
endpoints.markAsFailed(endpoint, Deadline.now + settings.RetryGateClosedFor)
case (Some(Pass(endpoint, uidOption, refuseUidOption)), Some(quarantineUid)) ⇒
@@ -628,7 +634,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
// Stop all matching stashed connections
stashedInbound = stashedInbound.map {
case (writer, associations) ⇒
- writer -> associations.filter { assoc ⇒
+ writer → associations.filter { assoc ⇒
val handle = assoc.association.asInstanceOf[AkkaProtocolHandle]
val drop = matchesQuarantine(handle)
if (drop) handle.disassociate()
@@ -734,7 +740,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
case ia @ InboundAssociation(handle: AkkaProtocolHandle) ⇒ endpoints.readOnlyEndpointFor(handle.remoteAddress) match {
case Some((endpoint, _)) ⇒
pendingReadHandoffs.get(endpoint) foreach (_.disassociate())
- pendingReadHandoffs += endpoint -> handle
+ pendingReadHandoffs += endpoint → handle
endpoint ! EndpointWriter.TakeOver(handle, self)
endpoints.writableEndpointWithPolicyFor(handle.remoteAddress) match {
case Some(Pass(ep, _, _)) ⇒ ep ! ReliableDeliverySupervisor.Ungate
@@ -749,13 +755,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
// to get an unstash event
if (!writerIsIdle) {
ep ! ReliableDeliverySupervisor.IsIdle
- stashedInbound += ep -> (stashedInbound.getOrElse(ep, Vector.empty) :+ ia)
+ stashedInbound += ep → (stashedInbound.getOrElse(ep, Vector.empty) :+ ia)
} else
createAndRegisterEndpoint(handle, refuseUid = endpoints.refuseUid(handle.remoteAddress))
case Some(Pass(ep, Some(uid), _)) ⇒
if (handle.handshakeInfo.uid == uid) {
pendingReadHandoffs.get(ep) foreach (_.disassociate())
- pendingReadHandoffs += ep -> handle
+ pendingReadHandoffs += ep → handle
ep ! EndpointWriter.StopReading(ep, self)
ep ! ReliableDeliverySupervisor.Ungate
} else {
@@ -800,7 +806,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
*/
val transports: Seq[AkkaProtocolTransport] = for ((fqn, adapters, config) ← settings.Transports) yield {
- val args = Seq(classOf[ExtendedActorSystem] -> context.system, classOf[Config] -> config)
+ val args = Seq(classOf[ExtendedActorSystem] → context.system, classOf[Config] → config)
// Loads the driver -- the bottom element of the chain.
// The chain at this point:
@@ -859,37 +865,40 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends
pendingReadHandoffs -= takingOverFrom
}
- private def createEndpoint(remoteAddress: Address,
- localAddress: Address,
- transport: AkkaProtocolTransport,
- endpointSettings: RemoteSettings,
- handleOption: Option[AkkaProtocolHandle],
- writing: Boolean,
- refuseUid: Option[Int]): ActorRef = {
+ private def createEndpoint(
+ remoteAddress: Address,
+ localAddress: Address,
+ transport: AkkaProtocolTransport,
+ endpointSettings: RemoteSettings,
+ handleOption: Option[AkkaProtocolHandle],
+ writing: Boolean,
+ refuseUid: Option[Int]): ActorRef = {
require(transportMapping contains localAddress, "Transport mapping is not defined for the address")
// refuseUid is ignored for read-only endpoints since the UID of the remote system is already known and has passed
// quarantine checks
- if (writing) context.watch(context.actorOf(RARP(extendedSystem).configureDispatcher(ReliableDeliverySupervisor.props(
- handleOption,
- localAddress,
- remoteAddress,
- refuseUid,
- transport,
- endpointSettings,
- AkkaPduProtobufCodec,
- receiveBuffers)).withDeploy(Deploy.local),
+ if (writing) context.watch(context.actorOf(
+ RARP(extendedSystem).configureDispatcher(ReliableDeliverySupervisor.props(
+ handleOption,
+ localAddress,
+ remoteAddress,
+ refuseUid,
+ transport,
+ endpointSettings,
+ AkkaPduProtobufCodec,
+ receiveBuffers)).withDeploy(Deploy.local),
"reliableEndpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next()))
- else context.watch(context.actorOf(RARP(extendedSystem).configureDispatcher(EndpointWriter.props(
- handleOption,
- localAddress,
- remoteAddress,
- refuseUid,
- transport,
- endpointSettings,
- AkkaPduProtobufCodec,
- receiveBuffers,
- reliableDeliverySupervisor = None)).withDeploy(Deploy.local),
+ else context.watch(context.actorOf(
+ RARP(extendedSystem).configureDispatcher(EndpointWriter.props(
+ handleOption,
+ localAddress,
+ remoteAddress,
+ refuseUid,
+ transport,
+ endpointSettings,
+ AkkaPduProtobufCodec,
+ receiveBuffers,
+ reliableDeliverySupervisor = None)).withDeploy(Deploy.local),
"endpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next()))
}
diff --git a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala
index f988447907..1f8350640d 100644
--- a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala
+++ b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala
@@ -26,9 +26,9 @@ sealed trait AssociationEvent extends RemotingLifecycleEvent {
@SerialVersionUID(1L)
final case class AssociatedEvent(
- localAddress: Address,
+ localAddress: Address,
remoteAddress: Address,
- inbound: Boolean)
+ inbound: Boolean)
extends AssociationEvent {
protected override def eventName: String = "Associated"
@@ -38,9 +38,9 @@ final case class AssociatedEvent(
@SerialVersionUID(1L)
final case class DisassociatedEvent(
- localAddress: Address,
+ localAddress: Address,
remoteAddress: Address,
- inbound: Boolean)
+ inbound: Boolean)
extends AssociationEvent {
protected override def eventName: String = "Disassociated"
override def logLevel: Logging.LogLevel = Logging.DebugLevel
@@ -48,11 +48,11 @@ final case class DisassociatedEvent(
@SerialVersionUID(1L)
final case class AssociationErrorEvent(
- cause: Throwable,
- localAddress: Address,
+ cause: Throwable,
+ localAddress: Address,
remoteAddress: Address,
- inbound: Boolean,
- logLevel: Logging.LogLevel) extends AssociationEvent {
+ inbound: Boolean,
+ logLevel: Logging.LogLevel) extends AssociationEvent {
protected override def eventName: String = "AssociationError"
override def toString: String = s"${super.toString}: Error [${cause.getMessage}] [${Logging.stackTraceFor(cause)}]"
def getCause: Throwable = cause
diff --git a/akka-remote/src/main/scala/akka/remote/security/provider/InternetSeedGenerator.scala b/akka-remote/src/main/scala/akka/remote/security/provider/InternetSeedGenerator.scala
index 0ad678cb43..de04e8f6bd 100644
--- a/akka-remote/src/main/scala/akka/remote/security/provider/InternetSeedGenerator.scala
+++ b/akka-remote/src/main/scala/akka/remote/security/provider/InternetSeedGenerator.scala
@@ -36,7 +36,8 @@ object InternetSeedGenerator {
private final val Instance: InternetSeedGenerator = new InternetSeedGenerator
/**Delegate generators. */
private final val Generators: immutable.Seq[SeedGenerator] =
- List(new RandomDotOrgSeedGenerator, // first try the Internet seed generator
+ List(
+ new RandomDotOrgSeedGenerator, // first try the Internet seed generator
new SecureRandomSeedGenerator) // this is last because it always works
}
diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala
index 625d6487d0..c2d3fa3ef6 100644
--- a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala
+++ b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala
@@ -68,8 +68,8 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
private val ActorIdentifyManifest = "B"
private val fromBinaryMap = Map[String, Array[Byte] ⇒ AnyRef](
- IdentifyManifest -> deserializeIdentify,
- ActorIdentifyManifest -> deserializeActorIdentity)
+ IdentifyManifest → deserializeIdentify,
+ ActorIdentifyManifest → deserializeActorIdentity)
override def manifest(o: AnyRef): String =
o match {
diff --git a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala
index 548b5ae1bb..620c31b24b 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala
@@ -27,7 +27,7 @@ class TransportAdapters(system: ExtendedActorSystem) extends Extension {
val settings = RARP(system).provider.remoteSettings
private val adaptersTable: Map[String, TransportAdapterProvider] = for ((name, fqn) ← settings.Adapters) yield {
- name -> system.dynamicAccess.createInstanceFor[TransportAdapterProvider](fqn, immutable.Seq.empty).recover({
+ name → system.dynamicAccess.createInstanceFor[TransportAdapterProvider](fqn, immutable.Seq.empty).recover({
case e ⇒ throw new IllegalArgumentException(s"Cannot instantiate transport adapter [${fqn}]", e)
}).get
}
@@ -68,8 +68,9 @@ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transpor
protected def maximumOverhead: Int
- protected def interceptListen(listenAddress: Address,
- listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener]
+ protected def interceptListen(
+ listenAddress: Address,
+ listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener]
protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit
@@ -116,14 +117,16 @@ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transpor
}
-abstract class AbstractTransportAdapterHandle(val originalLocalAddress: Address,
- val originalRemoteAddress: Address,
- val wrappedHandle: AssociationHandle,
- val addedSchemeIdentifier: String) extends AssociationHandle
+abstract class AbstractTransportAdapterHandle(
+ val originalLocalAddress: Address,
+ val originalRemoteAddress: Address,
+ val wrappedHandle: AssociationHandle,
+ val addedSchemeIdentifier: String) extends AssociationHandle
with SchemeAugmenter {
def this(wrappedHandle: AssociationHandle, addedSchemeIdentifier: String) =
- this(wrappedHandle.localAddress,
+ this(
+ wrappedHandle.localAddress,
wrappedHandle.remoteAddress,
wrappedHandle,
addedSchemeIdentifier)
@@ -138,8 +141,9 @@ object ActorTransportAdapter {
final case class ListenerRegistered(listener: AssociationEventListener) extends TransportOperation
final case class AssociateUnderlying(remoteAddress: Address, statusPromise: Promise[AssociationHandle]) extends TransportOperation
- final case class ListenUnderlying(listenAddress: Address,
- upstreamListener: Future[AssociationEventListener]) extends TransportOperation
+ final case class ListenUnderlying(
+ listenAddress: Address,
+ upstreamListener: Future[AssociationEventListener]) extends TransportOperation
final case class DisassociateUnderlying(info: DisassociateInfo = AssociationHandle.Unknown)
extends TransportOperation with DeadLetterSuppression
@@ -159,8 +163,9 @@ abstract class ActorTransportAdapter(wrappedTransport: Transport, system: ActorS
private def registerManager(): Future[ActorRef] =
(system.actorSelection("/system/transports") ? RegisterTransportActor(managerProps, managerName)).mapTo[ActorRef]
- override def interceptListen(listenAddress: Address,
- listenerPromise: Future[AssociationEventListener]): Future[AssociationEventListener] = {
+ override def interceptListen(
+ listenAddress: Address,
+ listenerPromise: Future[AssociationEventListener]): Future[AssociationEventListener] = {
registerManager().map { mgr ⇒
// Side effecting: storing the manager instance in volatile var
// This is done only once: during the initialization of the protocol stack. The variable manager is not read
diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala
index 8415e2eaaf..ed8e3d5ad7 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala
@@ -34,11 +34,12 @@ private[remote] object AkkaPduCodec {
case object Heartbeat extends AkkaPdu
final case class Payload(bytes: ByteString) extends AkkaPdu
- final case class Message(recipient: InternalActorRef,
- recipientAddress: Address,
- serializedMessage: SerializedMessage,
- senderOption: Option[ActorRef],
- seqOption: Option[SeqNo]) extends HasSequenceNumber {
+ final case class Message(
+ recipient: InternalActorRef,
+ recipientAddress: Address,
+ serializedMessage: SerializedMessage,
+ senderOption: Option[ActorRef],
+ seqOption: Option[SeqNo]) extends HasSequenceNumber {
def reliableDeliveryEnabled = seqOption.isDefined
@@ -93,12 +94,12 @@ private[remote] trait AkkaPduCodec {
def decodeMessage(raw: ByteString, provider: RemoteActorRefProvider, localAddress: Address): (Option[Ack], Option[Message])
def constructMessage(
- localAddress: Address,
- recipient: ActorRef,
+ localAddress: Address,
+ recipient: ActorRef,
serializedMessage: SerializedMessage,
- senderOption: Option[ActorRef],
- seqOption: Option[SeqNo] = None,
- ackOption: Option[Ack] = None): ByteString
+ senderOption: Option[ActorRef],
+ seqOption: Option[SeqNo] = None,
+ ackOption: Option[Ack] = None): ByteString
def constructPureAck(ack: Ack): ByteString
}
@@ -117,12 +118,12 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec {
}
override def constructMessage(
- localAddress: Address,
- recipient: ActorRef,
+ localAddress: Address,
+ recipient: ActorRef,
serializedMessage: SerializedMessage,
- senderOption: Option[ActorRef],
- seqOption: Option[SeqNo] = None,
- ackOption: Option[Ack] = None): ByteString = {
+ senderOption: Option[ActorRef],
+ seqOption: Option[SeqNo] = None,
+ ackOption: Option[Ack] = None): ByteString = {
val ackAndEnvelopeBuilder = AckAndEnvelopeContainer.newBuilder
@@ -175,8 +176,8 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec {
}
override def decodeMessage(
- raw: ByteString,
- provider: RemoteActorRefProvider,
+ raw: ByteString,
+ provider: RemoteActorRefProvider,
localAddress: Address): (Option[Ack], Option[Message]) = {
val ackAndEnvelope = AckAndEnvelopeContainer.parseFrom(raw.toArray)
@@ -225,7 +226,7 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec {
Address(encodedAddress.getProtocol, encodedAddress.getSystem, encodedAddress.getHostname, encodedAddress.getPort)
private def constructControlMessagePdu(
- code: WireFormats.CommandType,
+ code: WireFormats.CommandType,
handshakeInfo: Option[AkkaHandshakeInfo.Builder]): ByteString = {
val controlMessageBuilder = AkkaControlMessage.newBuilder()
diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala
index 187237e8da..8a0ac30caa 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala
@@ -51,7 +51,8 @@ private[remote] class AkkaProtocolSettings(config: Config) {
else if (enabledTransports.contains("akka.remote.netty.ssl"))
config.getMillisDuration("akka.remote.netty.ssl.connection-timeout")
else
- config.getMillisDuration("akka.remote.handshake-timeout").requiring(_ > Duration.Zero,
+ config.getMillisDuration("akka.remote.handshake-timeout").requiring(
+ _ > Duration.Zero,
"handshake-timeout must be > 0")
}
}
@@ -64,7 +65,7 @@ private[remote] object AkkaProtocolTransport { //Couldn't these go into the Remo
final case class AssociateUnderlyingRefuseUid(
remoteAddress: Address,
statusPromise: Promise[AssociationHandle],
- refuseUid: Option[Int]) extends NoSerializationVerificationNeeded
+ refuseUid: Option[Int]) extends NoSerializationVerificationNeeded
}
final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String])
@@ -93,10 +94,10 @@ final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String]
* the codec that will be used to encode/decode Akka PDUs
*/
private[remote] class AkkaProtocolTransport(
- wrappedTransport: Transport,
- private val system: ActorSystem,
+ wrappedTransport: Transport,
+ private val system: ActorSystem,
private val settings: AkkaProtocolSettings,
- private val codec: AkkaPduCodec) extends ActorTransportAdapter(wrappedTransport, system) {
+ private val codec: AkkaPduCodec) extends ActorTransportAdapter(wrappedTransport, system) {
override val addedSchemeIdentifier: String = AkkaScheme
@@ -122,7 +123,7 @@ private[remote] class AkkaProtocolTransport(
private[transport] class AkkaProtocolManager(
private val wrappedTransport: Transport,
- private val settings: AkkaProtocolSettings)
+ private val settings: AkkaProtocolSettings)
extends ActorTransportAdapterManager {
// The AkkaProtocolTransport does not handle the recovery of associations, this task is implemented in the
@@ -158,7 +159,7 @@ private[transport] class AkkaProtocolManager(
private def createOutboundStateActor(
remoteAddress: Address,
statusPromise: Promise[AssociationHandle],
- refuseUid: Option[Int]): Unit = {
+ refuseUid: Option[Int]): Unit = {
val stateActorLocalAddress = localAddress
val stateActorSettings = settings
@@ -181,13 +182,13 @@ private[transport] class AkkaProtocolManager(
}
private[remote] class AkkaProtocolHandle(
- _localAddress: Address,
- _remoteAddress: Address,
+ _localAddress: Address,
+ _remoteAddress: Address,
val readHandlerPromise: Promise[HandleEventListener],
- _wrappedHandle: AssociationHandle,
- val handshakeInfo: HandshakeInfo,
+ _wrappedHandle: AssociationHandle,
+ val handshakeInfo: HandshakeInfo,
private val stateActor: ActorRef,
- private val codec: AkkaPduCodec)
+ private val codec: AkkaPduCodec)
extends AbstractTransportAdapterHandle(_localAddress, _remoteAddress, _wrappedHandle, AkkaScheme) {
override def write(payload: ByteString): Boolean = wrappedHandle.write(codec.constructPayload(payload))
@@ -257,34 +258,35 @@ private[transport] object ProtocolStateActor {
case object ForbiddenUidReason
private[remote] def outboundProps(
- handshakeInfo: HandshakeInfo,
- remoteAddress: Address,
- statusPromise: Promise[AssociationHandle],
- transport: Transport,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
+ handshakeInfo: HandshakeInfo,
+ remoteAddress: Address,
+ statusPromise: Promise[AssociationHandle],
+ transport: Transport,
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
failureDetector: FailureDetector,
- refuseUid: Option[Int]): Props =
+ refuseUid: Option[Int]): Props =
Props(classOf[ProtocolStateActor], handshakeInfo, remoteAddress, statusPromise, transport, settings, codec,
failureDetector, refuseUid).withDeploy(Deploy.local)
private[remote] def inboundProps(
- handshakeInfo: HandshakeInfo,
- wrappedHandle: AssociationHandle,
+ handshakeInfo: HandshakeInfo,
+ wrappedHandle: AssociationHandle,
associationListener: AssociationEventListener,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
- failureDetector: FailureDetector): Props =
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
+ failureDetector: FailureDetector): Props =
Props(classOf[ProtocolStateActor], handshakeInfo, wrappedHandle, associationListener, settings, codec,
failureDetector).withDeploy(Deploy.local)
}
-private[transport] class ProtocolStateActor(initialData: InitialProtocolStateData,
- private val localHandshakeInfo: HandshakeInfo,
- private val refuseUid: Option[Int],
- private val settings: AkkaProtocolSettings,
- private val codec: AkkaPduCodec,
- private val failureDetector: FailureDetector)
+private[transport] class ProtocolStateActor(
+ initialData: InitialProtocolStateData,
+ private val localHandshakeInfo: HandshakeInfo,
+ private val refuseUid: Option[Int],
+ private val settings: AkkaProtocolSettings,
+ private val codec: AkkaPduCodec,
+ private val failureDetector: FailureDetector)
extends Actor with FSM[AssociationState, ProtocolStateData]
with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
@@ -292,24 +294,26 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
import context.dispatcher
// Outbound case
- def this(handshakeInfo: HandshakeInfo,
- remoteAddress: Address,
- statusPromise: Promise[AssociationHandle],
- transport: Transport,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
- failureDetector: FailureDetector,
- refuseUid: Option[Int]) = {
+ def this(
+ handshakeInfo: HandshakeInfo,
+ remoteAddress: Address,
+ statusPromise: Promise[AssociationHandle],
+ transport: Transport,
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
+ failureDetector: FailureDetector,
+ refuseUid: Option[Int]) = {
this(OutboundUnassociated(remoteAddress, statusPromise, transport), handshakeInfo, refuseUid, settings, codec, failureDetector)
}
// Inbound case
- def this(handshakeInfo: HandshakeInfo,
- wrappedHandle: AssociationHandle,
- associationListener: AssociationEventListener,
- settings: AkkaProtocolSettings,
- codec: AkkaPduCodec,
- failureDetector: FailureDetector) = {
+ def this(
+ handshakeInfo: HandshakeInfo,
+ wrappedHandle: AssociationHandle,
+ associationListener: AssociationEventListener,
+ settings: AkkaProtocolSettings,
+ codec: AkkaPduCodec,
+ failureDetector: FailureDetector) = {
this(InboundUnassociated(associationListener, wrappedHandle), handshakeInfo, refuseUid = None, settings, codec, failureDetector)
}
@@ -413,7 +417,8 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
immutable.Queue.empty)
} else {
if (log.isDebugEnabled)
- log.warning(s"Association attempt with mismatching cookie from [{}]. Expected [{}] but received [{}].",
+ log.warning(
+ s"Association attempt with mismatching cookie from [{}]. Expected [{}] but received [{}].",
info.origin, localHandshakeInfo.cookie.getOrElse(""), info.cookie.getOrElse(""))
else
log.warning(s"Association attempt with mismatching cookie from [{}].", info.origin)
@@ -581,9 +586,10 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
private def listenForListenerRegistration(readHandlerPromise: Promise[HandleEventListener]): Unit =
readHandlerPromise.future.map { HandleListenerRegistered(_) } pipeTo self
- private def notifyOutboundHandler(wrappedHandle: AssociationHandle,
- handshakeInfo: HandshakeInfo,
- statusPromise: Promise[AssociationHandle]): Future[HandleEventListener] = {
+ private def notifyOutboundHandler(
+ wrappedHandle: AssociationHandle,
+ handshakeInfo: HandshakeInfo,
+ statusPromise: Promise[AssociationHandle]): Future[HandleEventListener] = {
val readHandlerPromise = Promise[HandleEventListener]()
listenForListenerRegistration(readHandlerPromise)
@@ -599,9 +605,10 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat
readHandlerPromise.future
}
- private def notifyInboundHandler(wrappedHandle: AssociationHandle,
- handshakeInfo: HandshakeInfo,
- associationListener: AssociationEventListener): Future[HandleEventListener] = {
+ private def notifyInboundHandler(
+ wrappedHandle: AssociationHandle,
+ handshakeInfo: HandshakeInfo,
+ associationListener: AssociationEventListener): Future[HandleEventListener] = {
val readHandlerPromise = Promise[HandleEventListener]()
listenForListenerRegistration(readHandlerPromise)
diff --git a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala
index 6f367d1078..b439244415 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala
@@ -77,8 +77,9 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor
case _ ⇒ wrappedTransport.managementCommand(cmd)
}
- protected def interceptListen(listenAddress: Address,
- listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = {
+ protected def interceptListen(
+ listenAddress: Address,
+ listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = {
log.warning("FailureInjectorTransport is active on this system. Gremlins might munch your packets.")
listenerFuture.onSuccess {
// Side effecting: As this class is not an actor, the only way to safely modify state is through volatile vars.
@@ -140,8 +141,9 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor
/**
* INTERNAL API
*/
-private[remote] final case class FailureInjectorHandle(_wrappedHandle: AssociationHandle,
- private val gremlinAdapter: FailureInjectorTransportAdapter)
+private[remote] final case class FailureInjectorHandle(
+ _wrappedHandle: AssociationHandle,
+ private val gremlinAdapter: FailureInjectorTransportAdapter)
extends AbstractTransportAdapterHandle(_wrappedHandle, FailureInjectorSchemeIdentifier)
with HandleEventListener {
import gremlinAdapter.extendedSystem.dispatcher
diff --git a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala
index 8d5de1e414..d66ff291f5 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala
@@ -24,10 +24,10 @@ import scala.concurrent.ExecutionContext.Implicits.global
* production systems.
*/
class TestTransport(
- val localAddress: Address,
- final val registry: AssociationRegistry,
- val maximumPayloadBytes: Int = 32000,
- val schemeIdentifier: String = "test") extends Transport {
+ val localAddress: Address,
+ final val registry: AssociationRegistry,
+ val maximumPayloadBytes: Int = 32000,
+ val schemeIdentifier: String = "test") extends Transport {
def this(system: ExtendedActorSystem, conf: Config) = {
this(
@@ -141,12 +141,12 @@ class TestTransport(
*/
val writeBehavior = new SwitchableLoggedBehavior[(TestAssociationHandle, ByteString), Boolean](
defaultBehavior = {
- defaultWrite _
- },
+ defaultWrite _
+ },
logCallback = {
- case (handle, payload) ⇒
- registry.logActivity(WriteAttempt(handle.localAddress, handle.remoteAddress, payload))
- })
+ case (handle, payload) ⇒
+ registry.logActivity(WriteAttempt(handle.localAddress, handle.remoteAddress, payload))
+ })
/**
* The [[akka.remote.transport.TestTransport.SwitchableLoggedBehavior]] for the disassociate() method on handles. All
@@ -154,12 +154,12 @@ class TestTransport(
*/
val disassociateBehavior = new SwitchableLoggedBehavior[TestAssociationHandle, Unit](
defaultBehavior = {
- defaultDisassociate _
- },
+ defaultDisassociate _
+ },
logCallback = {
- (handle) ⇒
- registry.logActivity(DisassociateAttempt(handle.localAddress, handle.remoteAddress))
- })
+ (handle) ⇒
+ registry.logActivity(DisassociateAttempt(handle.localAddress, handle.remoteAddress))
+ })
private[akka] def write(handle: TestAssociationHandle, payload: ByteString): Boolean =
Await.result(writeBehavior((handle, payload)), 3.seconds)
@@ -299,8 +299,9 @@ object TestTransport {
* @param listenerPair pair of listeners in initiator, receiver order.
* @return
*/
- def remoteListenerRelativeTo(handle: TestAssociationHandle,
- listenerPair: (HandleEventListener, HandleEventListener)): HandleEventListener = {
+ def remoteListenerRelativeTo(
+ handle: TestAssociationHandle,
+ listenerPair: (HandleEventListener, HandleEventListener)): HandleEventListener = {
listenerPair match {
case (initiator, receiver) ⇒ if (handle.inbound) initiator else receiver
}
@@ -448,10 +449,10 @@ object AssociationRegistry {
}
final case class TestAssociationHandle(
- localAddress: Address,
+ localAddress: Address,
remoteAddress: Address,
- transport: TestTransport,
- inbound: Boolean) extends AssociationHandle {
+ transport: TestTransport,
+ inbound: Boolean) extends AssociationHandle {
@volatile var writable = true
diff --git a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala
index b24beb2b43..6e9d525e07 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala
@@ -232,7 +232,7 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) extends A
val inMode = getInboundMode(naked)
wrappedHandle.outboundThrottleMode.set(getOutboundMode(naked))
wrappedHandle.readHandlerPromise.future map { ListenerAndMode(_, inMode) } pipeTo wrappedHandle.throttlerActor
- handleTable ::= naked -> wrappedHandle
+ handleTable ::= naked → wrappedHandle
statusPromise.success(wrappedHandle)
case SetThrottle(address, direction, mode) ⇒
val naked = nakedAddress(address)
@@ -259,7 +259,7 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) extends A
case Checkin(origin, handle) ⇒
val naked: Address = nakedAddress(origin)
- handleTable ::= naked -> handle
+ handleTable ::= naked → handle
setMode(naked, handle)
}
@@ -360,10 +360,10 @@ private[transport] object ThrottledAssociation {
* INTERNAL API
*/
private[transport] class ThrottledAssociation(
- val manager: ActorRef,
+ val manager: ActorRef,
val associationHandler: AssociationEventListener,
- val originalHandle: AssociationHandle,
- val inbound: Boolean)
+ val originalHandle: AssociationHandle,
+ val inbound: Boolean)
extends Actor with LoggingFSM[ThrottledAssociation.ThrottlerState, ThrottledAssociation.ThrottlerData]
with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import ThrottledAssociation._
diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala
index 15dbc9171f..4985efee79 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala
@@ -167,10 +167,11 @@ private[netty] trait CommonHandlers extends NettyHelpers {
protected def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle
- protected def registerListener(channel: Channel,
- listener: HandleEventListener,
- msg: ChannelBuffer,
- remoteSocketAddress: InetSocketAddress): Unit
+ protected def registerListener(
+ channel: Channel,
+ listener: HandleEventListener,
+ msg: ChannelBuffer,
+ remoteSocketAddress: InetSocketAddress): Unit
final protected def init(channel: Channel, remoteSocketAddress: SocketAddress, remoteAddress: Address, msg: ChannelBuffer)(
op: (AssociationHandle ⇒ Any)): Unit = {
@@ -193,8 +194,9 @@ private[netty] trait CommonHandlers extends NettyHelpers {
/**
* INTERNAL API
*/
-private[netty] abstract class ServerHandler(protected final val transport: NettyTransport,
- private final val associationListenerFuture: Future[AssociationEventListener])
+private[netty] abstract class ServerHandler(
+ protected final val transport: NettyTransport,
+ private final val associationListenerFuture: Future[AssociationEventListener])
extends NettyServerHelpers with CommonHandlers {
import transport.executionContext
@@ -205,7 +207,7 @@ private[netty] abstract class ServerHandler(protected final val transport: Netty
case listener: AssociationEventListener ⇒
val remoteAddress = NettyTransport.addressFromSocketAddress(remoteSocketAddress, transport.schemeIdentifier,
transport.system.name, hostName = None, port = None).getOrElse(
- throw new NettyTransportException(s"Unknown inbound remote address type [${remoteSocketAddress.getClass.getName}]"))
+ throw new NettyTransportException(s"Unknown inbound remote address type [${remoteSocketAddress.getClass.getName}]"))
init(channel, remoteSocketAddress, remoteAddress, msg) { listener notify InboundAssociation(_) }
}
}
diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala
index 787c754906..26fe1f892d 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala
@@ -28,10 +28,11 @@ private[remote] trait TcpHandlers extends CommonHandlers {
import ChannelLocalActor._
- override def registerListener(channel: Channel,
- listener: HandleEventListener,
- msg: ChannelBuffer,
- remoteSocketAddress: InetSocketAddress): Unit = ChannelLocalActor.set(channel, Some(listener))
+ override def registerListener(
+ channel: Channel,
+ listener: HandleEventListener,
+ msg: ChannelBuffer,
+ remoteSocketAddress: InetSocketAddress): Unit = ChannelLocalActor.set(channel, Some(listener))
override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle =
new TcpAssociationHandle(localAddress, remoteAddress, transport, channel)
@@ -75,10 +76,11 @@ private[remote] class TcpClientHandler(_transport: NettyTransport, remoteAddress
/**
* INTERNAL API
*/
-private[remote] class TcpAssociationHandle(val localAddress: Address,
- val remoteAddress: Address,
- val transport: NettyTransport,
- private val channel: Channel)
+private[remote] class TcpAssociationHandle(
+ val localAddress: Address,
+ val remoteAddress: Address,
+ val transport: NettyTransport,
+ private val channel: Channel)
extends AssociationHandle {
import transport.executionContext
diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala
index 7e17cd7987..37d0192f74 100644
--- a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala
+++ b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala
@@ -21,10 +21,11 @@ private[remote] trait UdpHandlers extends CommonHandlers {
override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle =
new UdpAssociationHandle(localAddress, remoteAddress, channel, transport)
- override def registerListener(channel: Channel,
- listener: HandleEventListener,
- msg: ChannelBuffer,
- remoteSocketAddress: InetSocketAddress): Unit = {
+ override def registerListener(
+ channel: Channel,
+ listener: HandleEventListener,
+ msg: ChannelBuffer,
+ remoteSocketAddress: InetSocketAddress): Unit = {
transport.udpConnectionTable.putIfAbsent(remoteSocketAddress, listener) match {
case null ⇒ listener notify InboundPayload(ByteString(msg.array()))
case oldReader ⇒
@@ -72,10 +73,11 @@ private[remote] class UdpClientHandler(_transport: NettyTransport, remoteAddress
/**
* INTERNAL API
*/
-private[remote] class UdpAssociationHandle(val localAddress: Address,
- val remoteAddress: Address,
- private val channel: Channel,
- private val transport: NettyTransport) extends AssociationHandle {
+private[remote] class UdpAssociationHandle(
+ val localAddress: Address,
+ val remoteAddress: Address,
+ private val channel: Channel,
+ private val transport: NettyTransport) extends AssociationHandle {
override val readHandlerPromise: Promise[HandleEventListener] = Promise()
diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
index 4b40c2208b..2da9207c72 100644
--- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala
@@ -23,12 +23,12 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") {
}
def createFailureDetector(
- threshold: Double = 8.0,
- maxSampleSize: Int = 1000,
- minStdDeviation: FiniteDuration = 100.millis,
+ threshold: Double = 8.0,
+ maxSampleSize: Int = 1000,
+ minStdDeviation: FiniteDuration = 100.millis,
acceptableLostDuration: FiniteDuration = Duration.Zero,
firstHeartbeatEstimate: FiniteDuration = 1.second,
- clock: Clock = FailureDetector.defaultClock) =
+ clock: Clock = FailureDetector.defaultClock) =
new PhiAccrualFailureDetector(
threshold,
maxSampleSize,
@@ -63,7 +63,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") {
"return realistic phi values" in {
val fd = createFailureDetector()
- val test = TreeMap(0 -> 0.0, 500 -> 0.1, 1000 -> 0.3, 1200 -> 1.6, 1400 -> 4.7, 1600 -> 10.8, 1700 -> 15.3)
+ val test = TreeMap(0 → 0.0, 500 → 0.1, 1000 → 0.3, 1200 → 1.6, 1400 → 4.7, 1600 → 10.8, 1700 → 15.3)
for ((timeDiff, expectedPhi) ← test) {
fd.phi(timeDiff = timeDiff, mean = 1000.0, stdDeviation = 100.0) should ===(expectedPhi +- (0.1))
}
@@ -81,7 +81,8 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") {
"return phi based on guess when only one heartbeat" in {
val timeInterval = List[Long](0, 1000, 1000, 1000, 1000)
- val fd = createFailureDetector(firstHeartbeatEstimate = 1.seconds,
+ val fd = createFailureDetector(
+ firstHeartbeatEstimate = 1.seconds,
clock = fakeTimeGenerator(timeInterval))
fd.heartbeat()
diff --git a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala
index 4edee05bc1..8bb4256b47 100644
--- a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala
@@ -23,7 +23,7 @@ class DeadlineFailureDetectorSpec extends AkkaSpec {
def createFailureDetector(
acceptableLostDuration: FiniteDuration,
- clock: Clock = FailureDetector.defaultClock) =
+ clock: Clock = FailureDetector.defaultClock) =
new DeadlineFailureDetector(acceptableLostDuration, heartbeatInterval = 1.second)(clock = clock)
"mark node as monitored after a series of successful heartbeats" in {
diff --git a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala
index a620de6726..4800b3c060 100644
--- a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala
@@ -16,12 +16,12 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") {
}
def createFailureDetector(
- threshold: Double = 8.0,
- maxSampleSize: Int = 1000,
- minStdDeviation: FiniteDuration = 10.millis,
+ threshold: Double = 8.0,
+ maxSampleSize: Int = 1000,
+ minStdDeviation: FiniteDuration = 10.millis,
acceptableLostDuration: FiniteDuration = Duration.Zero,
firstHeartbeatEstimate: FiniteDuration = 1.second,
- clock: Clock = FailureDetector.defaultClock) =
+ clock: Clock = FailureDetector.defaultClock) =
new PhiAccrualFailureDetector(
threshold,
maxSampleSize,
@@ -29,12 +29,13 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") {
acceptableLostDuration,
firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock)
- def createFailureDetectorRegistry(threshold: Double = 8.0,
- maxSampleSize: Int = 1000,
- minStdDeviation: FiniteDuration = 10.millis,
- acceptableLostDuration: FiniteDuration = Duration.Zero,
- firstHeartbeatEstimate: FiniteDuration = 1.second,
- clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = {
+ def createFailureDetectorRegistry(
+ threshold: Double = 8.0,
+ maxSampleSize: Int = 1000,
+ minStdDeviation: FiniteDuration = 10.millis,
+ acceptableLostDuration: FiniteDuration = Duration.Zero,
+ firstHeartbeatEstimate: FiniteDuration = 1.second,
+ clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = {
new DefaultFailureDetectorRegistry[String](() ⇒ createFailureDetector(
threshold,
maxSampleSize,
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala
index 5de9b1ad98..75f6781108 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala
@@ -46,8 +46,8 @@ class RemoteConfigSpec extends AkkaSpec(
Transports.head._1 should ===(classOf[akka.remote.transport.netty.NettyTransport].getName)
Transports.head._2 should ===(Nil)
Adapters should ===(Map(
- "gremlin" -> classOf[akka.remote.transport.FailureInjectorProvider].getName,
- "trttl" -> classOf[akka.remote.transport.ThrottlerProvider].getName))
+ "gremlin" → classOf[akka.remote.transport.FailureInjectorProvider].getName,
+ "trttl" → classOf[akka.remote.transport.ThrottlerProvider].getName))
WatchFailureDetectorImplementationClass should ===(classOf[PhiAccrualFailureDetector].getName)
WatchHeartBeatInterval should ===(1 seconds)
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala
index d37e6bcafc..c574e94c21 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala
@@ -110,7 +110,8 @@ class RemoteRouterSpec extends AkkaSpec("""
"deploy its children on remote host driven by programatic definition" in {
val probe = TestProbe()(masterSystem)
- val router = masterSystem.actorOf(new RemoteRouterConfig(RoundRobinPool(2),
+ val router = masterSystem.actorOf(new RemoteRouterConfig(
+ RoundRobinPool(2),
Seq(Address("akka.tcp", sysName, "localhost", port))).props(echoActorProps), "blub2")
val replies = collectRouteePaths(probe, router, 5)
val children = replies.toSet
diff --git a/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala
index ac88c68859..23d1072c51 100644
--- a/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala
@@ -40,7 +40,8 @@ object RemoteWatcherSpec {
final case class Quarantined(address: Address, uid: Option[Int])
}
- class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration) extends RemoteWatcher(createFailureDetector,
+ class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration) extends RemoteWatcher(
+ createFailureDetector,
heartbeatInterval = TurnOff,
unreachableReaperInterval = TurnOff,
heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) {
diff --git a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala
index dbd188d377..4999a00548 100644
--- a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala
@@ -140,9 +140,9 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D
for (
(name, proto) ← Seq(
- "/gonk" -> "tcp",
- "/zagzag" -> "udp",
- "/roghtaar" -> "ssl.tcp")
+ "/gonk" → "tcp",
+ "/zagzag" → "udp",
+ "/roghtaar" → "ssl.tcp")
) deploy(system, Deploy(name, scope = RemoteScope(addr(remoteSystem, proto))))
def addr(sys: ActorSystem, proto: String) =
diff --git a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala
index 27efdbfc03..07e4128027 100644
--- a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala
+++ b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala
@@ -26,9 +26,9 @@ class MiscMessageSerializerSpec extends AkkaSpec(MiscMessageSerializerSpec.testC
"MiscMessageSerializer" must {
Seq(
- "Identify" -> Identify("some-message"),
- s"ActorIdentity without actor ref" -> ActorIdentity("some-message", ref = None),
- s"ActorIdentity with actor ref" -> ActorIdentity("some-message", ref = Some(testActor))).foreach {
+ "Identify" → Identify("some-message"),
+ s"ActorIdentity without actor ref" → ActorIdentity("some-message", ref = None),
+ s"ActorIdentity with actor ref" → ActorIdentity("some-message", ref = Some(testActor))).foreach {
case (scenario, item) ⇒
s"resolve serializer for $scenario" in {
val serializer = SerializationExtension(system)
diff --git a/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala b/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala
index a99fb5c3d3..b459d701bb 100644
--- a/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala
+++ b/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala
@@ -189,9 +189,11 @@ abstract class SystemMessageDeliveryStressTest(msg: String, cfg: String)
}
-class SystemMessageDeliveryRetryGate extends SystemMessageDeliveryStressTest("passive connections on",
+class SystemMessageDeliveryRetryGate extends SystemMessageDeliveryStressTest(
+ "passive connections on",
"akka.remote.retry-gate-closed-for = 0.5 s")
-class SystemMessageDeliveryNoPassiveRetryGate extends SystemMessageDeliveryStressTest("passive connections off",
+class SystemMessageDeliveryNoPassiveRetryGate extends SystemMessageDeliveryStressTest(
+ "passive connections off",
"""
akka.remote.use-passive-connections = off
akka.remote.retry-gate-closed-for = 0.5 s
diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
index 875b3d9053..68e0bb0e71 100644
--- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
+++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
@@ -32,7 +32,7 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig {
def nodeList = Seq(first, second, third)
// Extract individual sigar library for every node.
- nodeList foreach { role ⇒
+ nodeList foreach { role =>
nodeConfig(role) {
ConfigFactory.parseString(s"""
# Disable legacy metrics in akka-cluster.
diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
index 567ad2806b..e2bb09ab3e 100644
--- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
@@ -28,7 +28,7 @@ object StatsSampleSpecConfig extends MultiNodeConfig {
def nodeList = Seq(first, second, third)
// Extract individual sigar library for every node.
- nodeList foreach { role ⇒
+ nodeList foreach { role =>
nodeConfig(role) {
ConfigFactory.parseString(s"""
# Disable legacy metrics in akka-cluster.
@@ -131,4 +131,4 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig)
}
-}
\ No newline at end of file
+}
diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
index ea075f2ed0..331cbb8580 100644
--- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
@@ -27,7 +27,7 @@ object TransformationSampleSpecConfig extends MultiNodeConfig {
def nodeList = Seq(frontend1, frontend2, backend1, backend2, backend3)
// Extract individual sigar library for every node.
- nodeList foreach { role ⇒
+ nodeList foreach { role =>
nodeConfig(role) {
ConfigFactory.parseString(s"""
# Disable legacy metrics in akka-cluster.
@@ -138,4 +138,4 @@ abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSamp
}
}
-}
\ No newline at end of file
+}
diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
index 3bc3b2be21..e22a1a281f 100644
--- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
@@ -31,7 +31,7 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig {
def nodeList = Seq(first, second, third)
// Extract individual sigar library for every node.
- nodeList foreach { role ⇒
+ nodeList foreach { role =>
nodeConfig(role) {
ConfigFactory.parseString(s"""
# Disable legacy metrics in akka-cluster.
diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
index 9e80f02ccf..dba0965de2 100644
--- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
@@ -24,7 +24,7 @@ object StatsSampleSpecConfig extends MultiNodeConfig {
def nodeList = Seq(first, second, third)
// Extract individual sigar library for every node.
- nodeList foreach { role ⇒
+ nodeList foreach { role =>
nodeConfig(role) {
ConfigFactory.parseString(s"""
# Disable legacy metrics in akka-cluster.
@@ -147,4 +147,4 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig)
}
-}
\ No newline at end of file
+}
diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
index d21388b393..cd9324181c 100644
--- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
@@ -26,7 +26,7 @@ object TransformationSampleSpecConfig extends MultiNodeConfig {
def nodeList = Seq(frontend1, frontend2, backend1, backend2, backend3)
// Extract individual sigar library for every node.
- nodeList foreach { role ⇒
+ nodeList foreach { role =>
nodeConfig(role) {
ConfigFactory.parseString(s"""
# Disable legacy metrics in akka-cluster.
@@ -137,4 +137,4 @@ abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSamp
}
}
-}
\ No newline at end of file
+}
diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala
index 9efcf2f63a..b7f13f9e1b 100644
--- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala
+++ b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala
@@ -28,7 +28,7 @@ object ServiceRegistrySpec extends MultiNodeConfig {
class Service extends Actor {
def receive = {
- case s: String ⇒ sender() ! self.path.name + ": " + s
+ case s: String => sender() ! self.path.name + ": " + s
}
}
diff --git a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala
index 37c62cfe7a..6ae3471c49 100644
--- a/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala
+++ b/akka-samples/akka-sample-distributed-data-java/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala
@@ -84,7 +84,7 @@ class VotingServiceSpec extends MultiNodeSpec(VotingServiceSpec) with STMultiNod
votingService ! VotingService.CLOSE
}
- val expected = (1 to 20).map(n ⇒ "#" + n -> BigInteger.valueOf(3L * N / 20)).toMap
+ val expected = (1 to 20).map(n => "#" + n -> BigInteger.valueOf(3L * N / 20)).toMap
awaitAssert {
votingService ! VotingService.GET_VOTES
val votes = expectMsgType[Votes](3.seconds)
diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedCache.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedCache.scala
index 3187449bfa..bc53f12bf5 100644
--- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedCache.scala
+++ b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedCache.scala
@@ -32,22 +32,22 @@ class ReplicatedCache extends Actor {
LWWMapKey("cache-" + math.abs(entryKey.hashCode) % 100)
def receive = {
- case PutInCache(key, value) ⇒
+ case PutInCache(key, value) =>
replicator ! Update(dataKey(key), LWWMap(), WriteLocal)(_ + (key -> value))
- case Evict(key) ⇒
+ case Evict(key) =>
replicator ! Update(dataKey(key), LWWMap(), WriteLocal)(_ - key)
- case GetFromCache(key) ⇒
+ case GetFromCache(key) =>
replicator ! Get(dataKey(key), ReadLocal, Some(Request(key, sender())))
- case g @ GetSuccess(LWWMapKey(_), Some(Request(key, replyTo))) ⇒
+ case g @ GetSuccess(LWWMapKey(_), Some(Request(key, replyTo))) =>
g.dataValue match {
- case data: LWWMap[_] ⇒ data.get(key) match {
- case Some(value) ⇒ replyTo ! Cached(key, Some(value))
- case None ⇒ replyTo ! Cached(key, None)
+ case data: LWWMap[_] => data.get(key) match {
+ case Some(value) => replyTo ! Cached(key, Some(value))
+ case None => replyTo ! Cached(key, None)
}
}
- case NotFound(_, Some(Request(key, replyTo))) ⇒
+ case NotFound(_, Some(Request(key, replyTo))) =>
replyTo ! Cached(key, None)
- case _: UpdateResponse[_] ⇒ // ok
+ case _: UpdateResponse[_] => // ok
}
}
diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedMetrics.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedMetrics.scala
index ce056545ef..7c40e1180e 100644
--- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedMetrics.scala
+++ b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ReplicatedMetrics.scala
@@ -30,7 +30,7 @@ object ReplicatedMetrics {
case class UsedHeap(percentPerNode: Map[String, Double]) {
override def toString =
percentPerNode.toSeq.sortBy(_._1).map {
- case (key, value) ⇒ key + " --> " + value + " %"
+ case (key, value) => key + " --> " + value + " %"
}.mkString("\n")
}
@@ -71,42 +71,42 @@ class ReplicatedMetrics(measureInterval: FiniteDuration, cleanupInterval: Finite
var nodesInCluster = Set.empty[String]
def receive = {
- case Tick ⇒
+ case Tick =>
val heap = memoryMBean.getHeapMemoryUsage
val used = heap.getUsed
val max = heap.getMax
replicator ! Update(UsedHeapKey, LWWMap.empty[Long], WriteLocal)(_ + (node -> used))
- replicator ! Update(MaxHeapKey, LWWMap.empty[Long], WriteLocal) { data ⇒
+ replicator ! Update(MaxHeapKey, LWWMap.empty[Long], WriteLocal) { data =>
data.get(node) match {
- case Some(`max`) ⇒ data // unchanged
- case _ ⇒ data + (node -> max)
+ case Some(`max`) => data // unchanged
+ case _ => data + (node -> max)
}
}
- case c @ Changed(MaxHeapKey) ⇒
+ case c @ Changed(MaxHeapKey) =>
maxHeap = c.get(MaxHeapKey).entries
- case c @ Changed(UsedHeapKey) ⇒
+ case c @ Changed(UsedHeapKey) =>
val usedHeapPercent = UsedHeap(c.get(UsedHeapKey).entries.collect {
- case (key, value) if maxHeap.contains(key) ⇒
+ case (key, value) if maxHeap.contains(key) =>
(key -> (value.toDouble / maxHeap(key)) * 100.0)
})
log.debug("Node {} observed:\n{}", node, usedHeapPercent)
context.system.eventStream.publish(usedHeapPercent)
- case _: UpdateResponse[_] ⇒ // ok
+ case _: UpdateResponse[_] => // ok
- case MemberUp(m) ⇒
+ case MemberUp(m) =>
nodesInCluster += nodeKey(m.address)
- case MemberRemoved(m, _) ⇒
+ case MemberRemoved(m, _) =>
nodesInCluster -= nodeKey(m.address)
if (m.address == cluster.selfAddress)
context.stop(self)
- case Cleanup ⇒
+ case Cleanup =>
def cleanupRemoved(data: LWWMap[Long]): LWWMap[Long] =
- (data.entries.keySet -- nodesInCluster).foldLeft(data) { case (d, key) ⇒ d - key }
+ (data.entries.keySet -- nodesInCluster).foldLeft(data) { case (d, key) => d - key }
replicator ! Update(UsedHeapKey, LWWMap.empty[Long], WriteLocal)(cleanupRemoved)
replicator ! Update(MaxHeapKey, LWWMap.empty[Long], WriteLocal)(cleanupRemoved)
diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ServiceRegistry.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ServiceRegistry.scala
index 4d6c91df1e..dfa3722257 100644
--- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ServiceRegistry.scala
+++ b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ServiceRegistry.scala
@@ -69,7 +69,7 @@ class ServiceRegistry extends Actor with ActorLogging {
}
def receive = {
- case Register(name, service) ⇒
+ case Register(name, service) =>
val dKey = serviceKey(name)
// store the service names in a separate GSet to be able to
// get notifications of new names
@@ -78,19 +78,19 @@ class ServiceRegistry extends Actor with ActorLogging {
// add the service
replicator ! Update(dKey, ORSet(), WriteLocal)(_ + service)
- case Lookup(name) ⇒
+ case Lookup(name) =>
sender() ! Bindings(name, services.getOrElse(name, Set.empty))
- case c @ Changed(AllServicesKey) ⇒
+ case c @ Changed(AllServicesKey) =>
val newKeys = c.get(AllServicesKey).elements
log.debug("Services changed, added: {}, all: {}", (newKeys -- keys), newKeys)
- (newKeys -- keys).foreach { dKey ⇒
+ (newKeys -- keys).foreach { dKey =>
// subscribe to get notifications of when services with this name are added or removed
replicator ! Subscribe(dKey, self)
}
keys = newKeys
- case c @ Changed(ServiceKey(serviceName)) ⇒
+ case c @ Changed(ServiceKey(serviceName)) =>
val name = serviceName.split(":").tail.mkString
val newServices = c.get(serviceKey(name)).elements
log.debug("Services changed for name [{}]: {}", name, newServices)
@@ -99,7 +99,7 @@ class ServiceRegistry extends Actor with ActorLogging {
if (leader)
newServices.foreach(context.watch) // watch is idempotent
- case LeaderChanged(node) ⇒
+ case LeaderChanged(node) =>
// Let one node (the leader) be responsible for removal of terminated services
// to avoid redundant work and too many death watch notifications.
// It is not critical to only do it from one node.
@@ -114,14 +114,14 @@ class ServiceRegistry extends Actor with ActorLogging {
for (refs ← services.valuesIterator; ref ← refs)
context.unwatch(ref)
- case Terminated(ref) ⇒
- val names = services.collect { case (name, refs) if refs.contains(ref) ⇒ name }
- names.foreach { name ⇒
+ case Terminated(ref) =>
+ val names = services.collect { case (name, refs) if refs.contains(ref) => name }
+ names.foreach { name =>
log.debug("Service with name [{}] terminated: {}", name, ref)
replicator ! Update(serviceKey(name), ORSet(), WriteLocal)(_ - ref)
}
- case _: UpdateResponse[_] ⇒ // ok
+ case _: UpdateResponse[_] => // ok
}
}
diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala
index 89d94fbda5..a695e5d637 100644
--- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala
+++ b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/ShoppingCart.scala
@@ -45,18 +45,18 @@ class ShoppingCart(userId: String) extends Actor {
//#get-cart
def receiveGetCart: Receive = {
- case GetCart ⇒
+ case GetCart =>
replicator ! Get(DataKey, readMajority, Some(sender()))
- case g @ GetSuccess(DataKey, Some(replyTo: ActorRef)) ⇒
+ case g @ GetSuccess(DataKey, Some(replyTo: ActorRef)) =>
val data = g.get(DataKey)
val cart = Cart(data.entries.values.toSet)
replyTo ! cart
- case NotFound(DataKey, Some(replyTo: ActorRef)) ⇒
+ case NotFound(DataKey, Some(replyTo: ActorRef)) =>
replyTo ! Cart(Set.empty)
- case GetFailure(DataKey, Some(replyTo: ActorRef)) ⇒
+ case GetFailure(DataKey, Some(replyTo: ActorRef)) =>
// ReadMajority failure, try again with local read
replicator ! Get(DataKey, ReadLocal, Some(replyTo))
}
@@ -64,9 +64,9 @@ class ShoppingCart(userId: String) extends Actor {
//#add-item
def receiveAddItem: Receive = {
- case cmd @ AddItem(item) ⇒
+ case cmd @ AddItem(item) =>
val update = Update(DataKey, LWWMap.empty[LineItem], writeMajority, Some(cmd)) {
- cart ⇒ updateCart(cart, item)
+ cart => updateCart(cart, item)
}
replicator ! update
}
@@ -74,38 +74,38 @@ class ShoppingCart(userId: String) extends Actor {
def updateCart(data: LWWMap[LineItem], item: LineItem): LWWMap[LineItem] =
data.get(item.productId) match {
- case Some(LineItem(_, _, existingQuantity)) ⇒
+ case Some(LineItem(_, _, existingQuantity)) =>
data + (item.productId -> item.copy(quantity = existingQuantity + item.quantity))
- case None ⇒ data + (item.productId -> item)
+ case None => data + (item.productId -> item)
}
//#remove-item
def receiveRemoveItem: Receive = {
- case cmd @ RemoveItem(productId) ⇒
+ case cmd @ RemoveItem(productId) =>
// Try to fetch latest from a majority of nodes first, since ORMap
// remove must have seen the item to be able to remove it.
replicator ! Get(DataKey, readMajority, Some(cmd))
- case GetSuccess(DataKey, Some(RemoveItem(productId))) ⇒
+ case GetSuccess(DataKey, Some(RemoveItem(productId))) =>
replicator ! Update(DataKey, LWWMap(), writeMajority, None) {
_ - productId
}
- case GetFailure(DataKey, Some(RemoveItem(productId))) ⇒
+ case GetFailure(DataKey, Some(RemoveItem(productId))) =>
// ReadMajority failed, fall back to best effort local value
replicator ! Update(DataKey, LWWMap(), writeMajority, None) {
_ - productId
}
- case NotFound(DataKey, Some(RemoveItem(productId))) ⇒
+ case NotFound(DataKey, Some(RemoveItem(productId))) =>
// nothing to remove
}
//#remove-item
def receiveOther: Receive = {
- case _: UpdateSuccess[_] | _: UpdateTimeout[_] ⇒
+ case _: UpdateSuccess[_] | _: UpdateTimeout[_] =>
// UpdateTimeout, will eventually be replicated
- case e: UpdateFailure[_] ⇒ throw new IllegalStateException("Unexpected failure: " + e)
+ case e: UpdateFailure[_] => throw new IllegalStateException("Unexpected failure: " + e)
}
}
diff --git a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/VotingService.scala b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/VotingService.scala
index 22e9c8bee4..0d34d9002d 100644
--- a/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/VotingService.scala
+++ b/akka-samples/akka-sample-distributed-data-scala/src/main/scala/sample/distributeddata/VotingService.scala
@@ -35,14 +35,14 @@ class VotingService extends Actor {
replicator ! Subscribe(OpenedKey, self)
def receive = {
- case Open ⇒
+ case Open =>
replicator ! Update(OpenedKey, Flag(), WriteAll(5.seconds))(_.switchOn)
becomeOpen()
- case c @ Changed(OpenedKey) if c.get(OpenedKey).enabled ⇒
+ case c @ Changed(OpenedKey) if c.get(OpenedKey).enabled =>
becomeOpen()
- case GetVotes ⇒
+ case GetVotes =>
sender() ! Votes(Map.empty, open = false)
}
@@ -53,36 +53,36 @@ class VotingService extends Actor {
}
def open: Receive = {
- case v @ Vote(participant) ⇒
+ case v @ Vote(participant) =>
val update = Update(CountersKey, PNCounterMap(), WriteLocal, request = Some(v)) {
_.increment(participant, 1)
}
replicator ! update
- case _: UpdateSuccess[_] ⇒
+ case _: UpdateSuccess[_] =>
- case Close ⇒
+ case Close =>
replicator ! Update(ClosedKey, Flag(), WriteAll(5.seconds))(_.switchOn)
context.become(getVotes(open = false))
- case c @ Changed(ClosedKey) if c.get(ClosedKey).enabled ⇒
+ case c @ Changed(ClosedKey) if c.get(ClosedKey).enabled =>
context.become(getVotes(open = false))
}
def getVotes(open: Boolean): Receive = {
- case GetVotes ⇒
+ case GetVotes =>
replicator ! Get(CountersKey, ReadAll(3.seconds), Some(GetVotesReq(sender())))
- case g @ GetSuccess(CountersKey, Some(GetVotesReq(replyTo))) ⇒
+ case g @ GetSuccess(CountersKey, Some(GetVotesReq(replyTo))) =>
val data = g.get(CountersKey)
replyTo ! Votes(data.entries, open)
- case NotFound(CountersKey, Some(GetVotesReq(replyTo))) ⇒
+ case NotFound(CountersKey, Some(GetVotesReq(replyTo))) =>
replyTo ! Votes(Map.empty, open)
- case _: GetFailure[_] ⇒
+ case _: GetFailure[_] =>
- case _: UpdateSuccess[_] ⇒
+ case _: UpdateSuccess[_] =>
}
}
diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala
index 5be54aa3ad..8f216849bc 100644
--- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala
+++ b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/ServiceRegistrySpec.scala
@@ -27,7 +27,7 @@ object ServiceRegistrySpec extends MultiNodeConfig {
class Service extends Actor {
def receive = {
- case s: String ⇒ sender() ! self.path.name + ": " + s
+ case s: String => sender() ! self.path.name + ": " + s
}
}
diff --git a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala
index 339982e460..693d10ad42 100644
--- a/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala
+++ b/akka-samples/akka-sample-distributed-data-scala/src/multi-jvm/scala/sample/distributeddata/VotingServiceSpec.scala
@@ -72,7 +72,7 @@ class VotingServiceSpec extends MultiNodeSpec(VotingServiceSpec) with STMultiNod
val p = TestProbe()
awaitAssert {
votingService.tell(GetVotes, p.ref)
- p.expectMsgPF(3.seconds) { case Votes(_, true) ⇒ true }
+ p.expectMsgPF(3.seconds) { case Votes(_, true) => true }
}
for (n ← 1 to N) {
votingService ! Vote("#" + ((n % 20) + 1))
@@ -83,7 +83,7 @@ class VotingServiceSpec extends MultiNodeSpec(VotingServiceSpec) with STMultiNod
votingService ! Close
}
- val expected = (1 to 20).map(n ⇒ "#" + n -> BigInt(3L * N / 20)).toMap
+ val expected = (1 to 20).map(n => "#" + n -> BigInt(3L * N / 20)).toMap
awaitAssert {
votingService ! GetVotes
expectMsg(3.seconds, Votes(expected, false))
diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala
index 4e299e3b04..e10d1a919f 100644
--- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala
+++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala
@@ -96,7 +96,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft
}
"put custom MDC values when specified" in {
- producer ! StringWithMDC("Message with custom MDC values", Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values"))
+ producer ! StringWithMDC("Message with custom MDC values", Map("ticketNumber" → 3671, "ticketDesc" → "Custom MDC Values"))
awaitCond(outputString.contains("----"), 5 seconds)
val s = outputString
@@ -109,7 +109,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft
}
"Support null values in custom MDC" in {
- producer ! StringWithMDC("Message with null custom MDC values", Map("ticketNumber" -> 3671, "ticketDesc" -> null))
+ producer ! StringWithMDC("Message with null custom MDC values", Map("ticketNumber" → 3671, "ticketDesc" → null))
awaitCond(outputString.contains("----"), 5 seconds)
val s = outputString
diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala
index e809752844..6db3b858e3 100644
--- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala
+++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala
@@ -16,12 +16,14 @@ object GraphStageMessages {
}
object TestSinkStage {
- def apply[T, M](stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M],
- probe: TestProbe) = new TestSinkStage(stageUnderTest, probe)
+ def apply[T, M](
+ stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M],
+ probe: TestProbe) = new TestSinkStage(stageUnderTest, probe)
}
-private[testkit] class TestSinkStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M],
- probe: TestProbe)
+private[testkit] class TestSinkStage[T, M](
+ stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M],
+ probe: TestProbe)
extends GraphStageWithMaterializedValue[SinkShape[T], M] {
val in = Inlet[T]("testSinkStage.in")
@@ -51,12 +53,14 @@ private[testkit] class TestSinkStage[T, M](stageUnderTest: GraphStageWithMateria
}
object TestSourceStage {
- def apply[T, M](stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M],
- probe: TestProbe) = Source.fromGraph(new TestSourceStage(stageUnderTest, probe))
+ def apply[T, M](
+ stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M],
+ probe: TestProbe) = Source.fromGraph(new TestSourceStage(stageUnderTest, probe))
}
-private[testkit] class TestSourceStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M],
- probe: TestProbe)
+private[testkit] class TestSourceStage[T, M](
+ stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M],
+ probe: TestProbe)
extends GraphStageWithMaterializedValue[SourceShape[T], M] {
val out = Outlet[T]("testSourceStage.out")
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala
index 17df41f66f..fdea503048 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala
@@ -8,18 +8,19 @@ import org.reactivestreams.Publisher
import akka.stream.ActorMaterializer
class ChainSetup[In, Out, M](
- stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M],
+ stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M],
val settings: ActorMaterializerSettings,
materializer: ActorMaterializer,
- toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit val system: ActorSystem) {
+ toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit val system: ActorSystem) {
def this(stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], settings: ActorMaterializerSettings, toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit system: ActorSystem) =
this(stream, settings, ActorMaterializer(settings)(system), toPublisher)(system)
- def this(stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M],
- settings: ActorMaterializerSettings,
- materializerCreator: (ActorMaterializerSettings, ActorRefFactory) ⇒ ActorMaterializer,
- toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit system: ActorSystem) =
+ def this(
+ stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M],
+ settings: ActorMaterializerSettings,
+ materializerCreator: (ActorMaterializerSettings, ActorRefFactory) ⇒ ActorMaterializer,
+ toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit system: ActorSystem) =
this(stream, settings, materializerCreator(settings, system), toPublisher)(system)
val upstream = TestPublisher.manualProbe[In]()
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala
index e86a967bd6..086ac50751 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala
@@ -79,7 +79,7 @@ object Coroner { // FIXME: remove once going back to project dependencies
*/
def watch(duration: FiniteDuration, reportTitle: String, out: PrintStream,
startAndStopDuration: FiniteDuration = defaultStartAndStopDuration,
- displayThreadCounts: Boolean = false): WatchHandle = {
+ displayThreadCounts: Boolean = false): WatchHandle = {
val watchedHandle = new WatchHandleImpl(startAndStopDuration)
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala
index 3ee5815319..db2b5fe5f2 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala
@@ -40,13 +40,13 @@ trait ScriptedTest extends Matchers {
}
final class Script[In, Out](
- val providedInputs: Vector[In],
+ val providedInputs: Vector[In],
val expectedOutputs: Vector[Out],
- val jumps: Vector[Int],
- val inputCursor: Int,
- val outputCursor: Int,
+ val jumps: Vector[Int],
+ val inputCursor: Int,
+ val outputCursor: Int,
val outputEndCursor: Int,
- val completed: Boolean) {
+ val completed: Boolean) {
require(jumps.size == providedInputs.size)
def provideInput: (In, Script[In, Out]) =
@@ -88,12 +88,12 @@ trait ScriptedTest extends Matchers {
}
class ScriptRunner[In, Out, M](
- op: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M],
- settings: ActorMaterializerSettings,
- script: Script[In, Out],
+ op: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M],
+ settings: ActorMaterializerSettings,
+ script: Script[In, Out],
maximumOverrun: Int,
maximumRequest: Int,
- maximumBuffer: Int)(implicit _system: ActorSystem)
+ maximumBuffer: Int)(implicit _system: ActorSystem)
extends ChainSetup(op, settings, toPublisher) {
var _debugLog = Vector.empty[String]
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala
index 6799addf66..60aa742c4c 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala
@@ -25,7 +25,8 @@ private[akka] final case class StreamTestDefaultMailbox() extends MailboxType wi
val actorClass = r.underlying.props.actorClass
assert(actorClass != classOf[Actor], s"Don't use anonymous actor classes, actor class for $r was [${actorClass.getName}]")
// StreamTcpManager is allowed to use another dispatcher
- assert(!actorClass.getName.startsWith("akka.stream."),
+ assert(
+ !actorClass.getName.startsWith("akka.stream."),
s"$r with actor class [${actorClass.getName}] must not run on default dispatcher in tests. " +
"Did you forget to define `props.withDispatcher` when creating the actor? " +
"Or did you forget to configure the `akka.stream.materializer` setting accordingly or force the " +
diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala
index 214e068876..4b6131d381 100644
--- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala
+++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala
@@ -28,7 +28,8 @@ object Utils {
try probe.awaitAssert {
impl.supervisor.tell(StreamSupervisor.GetChildren, probe.ref)
children = probe.expectMsgType[StreamSupervisor.Children].children
- assert(children.isEmpty,
+ assert(
+ children.isEmpty,
s"expected no StreamSupervisor children, but got [${children.mkString(", ")}]")
}
catch {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala
index 6982788c0d..a4ee8188a0 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala
@@ -41,15 +41,15 @@ class DslConsistencySpec extends WordSpec with Matchers {
val graphHelpers = Set("zipGraph", "zipWithGraph", "mergeGraph", "mergeSortedGraph", "interleaveGraph", "concatGraph", "prependGraph", "alsoToGraph")
val allowMissing: Map[Class[_], Set[String]] = Map(
- jFlowClass -> graphHelpers,
- jSourceClass -> graphHelpers,
+ jFlowClass → graphHelpers,
+ jSourceClass → graphHelpers,
// Java subflows can only be nested using .via and .to (due to type system restrictions)
- jSubFlowClass -> (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow")),
- jSubSourceClass -> (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow")),
- sFlowClass -> Set("of"),
- sSourceClass -> Set("adapt", "from"),
- sSinkClass -> Set("adapt"),
- sRunnableGraphClass -> Set("builder"))
+ jSubFlowClass → (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow")),
+ jSubSourceClass → (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow")),
+ sFlowClass → Set("of"),
+ sSourceClass → Set("adapt", "from"),
+ sSinkClass → Set("adapt"),
+ sRunnableGraphClass → Set("builder"))
def materializing(m: Method): Boolean = m.getParameterTypes.contains(classOf[ActorMaterializer])
@@ -61,12 +61,12 @@ class DslConsistencySpec extends WordSpec with Matchers {
"Java and Scala DSLs" must {
- ("Source" -> List[Class[_]](sSourceClass, jSourceClass)) ::
- ("SubSource" -> List[Class[_]](sSubSourceClass, jSubSourceClass)) ::
- ("Flow" -> List[Class[_]](sFlowClass, jFlowClass)) ::
- ("SubFlow" -> List[Class[_]](sSubFlowClass, jSubFlowClass)) ::
- ("Sink" -> List[Class[_]](sSinkClass, jSinkClass)) ::
- ("RunanbleFlow" -> List[Class[_]](sRunnableGraphClass, jRunnableGraphClass)) ::
+ ("Source" → List[Class[_]](sSourceClass, jSourceClass)) ::
+ ("SubSource" → List[Class[_]](sSubSourceClass, jSubSourceClass)) ::
+ ("Flow" → List[Class[_]](sFlowClass, jFlowClass)) ::
+ ("SubFlow" → List[Class[_]](sSubFlowClass, jSubFlowClass)) ::
+ ("Sink" → List[Class[_]](sSinkClass, jSinkClass)) ::
+ ("RunanbleFlow" → List[Class[_]](sRunnableGraphClass, jRunnableGraphClass)) ::
Nil foreach {
case (element, classes) ⇒
diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala
index 0d274b6624..ae3265f85c 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala
@@ -17,12 +17,12 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers {
Set("adapt") // the scaladsl -> javadsl bridge
val `scala -> java aliases` =
- ("apply" -> "create") ::
- ("apply" -> "of") ::
- ("apply" -> "from") ::
- ("apply" -> "fromGraph") ::
- ("apply" -> "fromIterator") ::
- ("apply" -> "fromFunctions") ::
+ ("apply" → "create") ::
+ ("apply" → "of") ::
+ ("apply" → "from") ::
+ ("apply" → "fromGraph") ::
+ ("apply" → "fromIterator") ::
+ ("apply" → "fromFunctions") ::
Nil
// format: OFF
diff --git a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala
index 7553abe07c..39fc26a45d 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala
@@ -21,7 +21,7 @@ class FusingSpec extends AkkaSpec {
implicit val materializer = ActorMaterializer()
def graph(async: Boolean) =
- Source.unfold(1)(x ⇒ Some(x -> x)).filter(_ % 2 == 1)
+ Source.unfold(1)(x ⇒ Some(x → x)).filter(_ % 2 == 1)
.alsoTo(Flow[Int].fold(0)(_ + _).to(Sink.head.named("otherSink")).addAttributes(if (async) Attributes.asyncBoundary else Attributes.none))
.via(Flow[Int].fold(1)(_ + _).named("mainSink"))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala
index d4a8f8d986..65ef8830f3 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala
@@ -369,21 +369,20 @@ class ActorPublisherSpec extends AkkaSpec(ActorPublisherSpec.config) with Implic
val sink1 = Sink.fromSubscriber(ActorSubscriber[String](system.actorOf(receiverProps(probe1.ref))))
val sink2: Sink[String, ActorRef] = Sink.actorSubscriber(receiverProps(probe2.ref))
- val senderRef2 = RunnableGraph.fromGraph(GraphDSL.create(Source.actorPublisher[Int](senderProps)) { implicit b ⇒
- source2 ⇒
- import GraphDSL.Implicits._
+ val senderRef2 = RunnableGraph.fromGraph(GraphDSL.create(Source.actorPublisher[Int](senderProps)) { implicit b ⇒ source2 ⇒
+ import GraphDSL.Implicits._
- val merge = b.add(Merge[Int](2))
- val bcast = b.add(Broadcast[String](2))
+ val merge = b.add(Merge[Int](2))
+ val bcast = b.add(Broadcast[String](2))
- source1 ~> merge.in(0)
- source2.out ~> merge.in(1)
+ source1 ~> merge.in(0)
+ source2.out ~> merge.in(1)
- merge.out.map(_.toString) ~> bcast.in
+ merge.out.map(_.toString) ~> bcast.in
- bcast.out(0).map(_ + "mark") ~> sink1
- bcast.out(1) ~> sink2
- ClosedShape
+ bcast.out(0).map(_ + "mark") ~> sink1
+ bcast.out(1) ~> sink2
+ ClosedShape
}).run()
(0 to 10).foreach {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala
index 2df319d326..76578d24bf 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala
@@ -85,7 +85,7 @@ object ActorSubscriberSpec {
def receive = {
case OnNext(Msg(id, replyTo)) ⇒
- queue += (id -> replyTo)
+ queue += (id → replyTo)
assert(queue.size <= 10, s"queued too many: ${queue.size}")
router.route(Work(id), self)
case Reply(id) ⇒
diff --git a/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala
index e3d3d61041..51dbef0805 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala
@@ -37,7 +37,7 @@ class FlowTimedSpec extends AkkaSpec with ScriptedTest {
val n = 20
val testRuns = 1 to 2
- def script = Script((1 to n) map { x ⇒ Seq(x) -> Seq(x) }: _*)
+ def script = Script((1 to n) map { x ⇒ Seq(x) → Seq(x) }: _*)
testRuns foreach (_ ⇒ runScript(script, settings) { flow ⇒
flow.
map(identity).
@@ -59,7 +59,7 @@ class FlowTimedSpec extends AkkaSpec with ScriptedTest {
val testRuns = 1 to 3
- def script = Script((1 to n) map { x ⇒ Seq(x) -> Seq(x) }: _*)
+ def script = Script((1 to n) map { x ⇒ Seq(x) → Seq(x) }: _*)
testRuns foreach (_ ⇒ runScript(script, settings) { flow ⇒
flow.timed(_.map(identity), onComplete = printInfo)
})
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala
index fc5f4bde0f..63b25ec15b 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala
@@ -243,8 +243,8 @@ class StreamLayoutSpec extends AkkaSpec {
materializer.subscribers.size should be(materializer.publishers.size)
- val inToSubscriber: Map[InPort, TestSubscriber] = materializer.subscribers.map(s ⇒ s.port -> s).toMap
- val outToPublisher: Map[OutPort, TestPublisher] = materializer.publishers.map(s ⇒ s.port -> s).toMap
+ val inToSubscriber: Map[InPort, TestSubscriber] = materializer.subscribers.map(s ⇒ s.port → s).toMap
+ val outToPublisher: Map[OutPort, TestPublisher] = materializer.publishers.map(s ⇒ s.port → s).toMap
for (publisher ← materializer.publishers) {
publisher.owner.isAtomic should be(true)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala
index 55035b1b80..7a48799576 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala
@@ -220,17 +220,16 @@ class ActorGraphInterpreterSpec extends AkkaSpec {
val takeAll = Flow[Int].grouped(200).toMat(Sink.head)(Keep.right)
- val (f1, f2) = RunnableGraph.fromGraph(GraphDSL.create(takeAll, takeAll)(Keep.both) { implicit b ⇒
- (out1, out2) ⇒
- import GraphDSL.Implicits._
- val bidi = b.add(rotatedBidi)
+ val (f1, f2) = RunnableGraph.fromGraph(GraphDSL.create(takeAll, takeAll)(Keep.both) { implicit b ⇒ (out1, out2) ⇒
+ import GraphDSL.Implicits._
+ val bidi = b.add(rotatedBidi)
- Source(1 to 10) ~> bidi.in1
- out2 <~ bidi.out2
+ Source(1 to 10) ~> bidi.in1
+ out2 <~ bidi.out2
- bidi.in2 <~ Source(1 to 100)
- bidi.out1 ~> out1
- ClosedShape
+ bidi.in2 <~ Source(1 to 100)
+ bidi.out1 ~> out1
+ ClosedShape
}).run()
Await.result(f1, 3.seconds) should ===(1 to 100)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala
index 09118ebe4c..0cae67e5bc 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala
@@ -39,17 +39,17 @@ trait GraphInterpreterSpecKit extends AkkaSpec {
var connections = Vector.empty[(Outlet[_], Inlet[_])]
def connect[T](upstream: UpstreamBoundaryStageLogic[T], in: Inlet[T]): AssemblyBuilder = {
- upstreams :+= upstream -> in
+ upstreams :+= upstream → in
this
}
def connect[T](out: Outlet[T], downstream: DownstreamBoundaryStageLogic[T]): AssemblyBuilder = {
- downstreams :+= out -> downstream
+ downstreams :+= out → downstream
this
}
def connect[T](out: Outlet[T], in: Inlet[T]): AssemblyBuilder = {
- connections :+= out -> in
+ connections :+= out → in
this
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala
index 8960456239..8fed034313 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala
@@ -143,10 +143,10 @@ class LifecycleInterpreterSpec extends AkkaSpec with GraphInterpreterSpecKit {
}
private[akka] case class PreStartAndPostStopIdentity[T](
- onStart: () ⇒ Unit = () ⇒ (),
- onStop: () ⇒ Unit = () ⇒ (),
- onUpstreamCompleted: () ⇒ Unit = () ⇒ (),
- onUpstreamFailed: Throwable ⇒ Unit = ex ⇒ ()) extends SimpleLinearGraphStage[T] {
+ onStart: () ⇒ Unit = () ⇒ (),
+ onStop: () ⇒ Unit = () ⇒ (),
+ onUpstreamCompleted: () ⇒ Unit = () ⇒ (),
+ onUpstreamFailed: Throwable ⇒ Unit = ex ⇒ ()) extends SimpleLinearGraphStage[T] {
override def createLogic(attributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) with InHandler with OutHandler {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala
index 9e507ff8c2..16dd3c4350 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala
@@ -555,10 +555,11 @@ class TcpSpec extends AkkaSpec("akka.stream.materializer.subscription-timeout.ti
}
}
- def validateServerClientCommunication(testData: ByteString,
- serverConnection: ServerConnection,
- readProbe: TcpReadProbe,
- writeProbe: TcpWriteProbe): Unit = {
+ def validateServerClientCommunication(
+ testData: ByteString,
+ serverConnection: ServerConnection,
+ readProbe: TcpReadProbe,
+ writeProbe: TcpWriteProbe): Unit = {
serverConnection.write(testData)
serverConnection.read(5)
readProbe.read(5) should be(testData)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala
index 1be60ea4ca..2aa6617849 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala
@@ -405,12 +405,11 @@ class TlsSpec extends AkkaSpec("akka.loglevel=INFO\nakka.actor.debug.receive=off
"reliably cancel subscriptions when TransportIn fails early" in assertAllStagesStopped {
val ex = new Exception("hello")
val (sub, out1, out2) =
- RunnableGraph.fromGraph(GraphDSL.create(Source.asSubscriber[SslTlsOutbound], Sink.head[ByteString], Sink.head[SslTlsInbound])((_, _, _)) { implicit b ⇒
- (s, o1, o2) ⇒
- val tls = b.add(clientTls(EagerClose))
- s ~> tls.in1; tls.out1 ~> o1
- o2 <~ tls.out2; tls.in2 <~ Source.failed(ex)
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(Source.asSubscriber[SslTlsOutbound], Sink.head[ByteString], Sink.head[SslTlsInbound])((_, _, _)) { implicit b ⇒ (s, o1, o2) ⇒
+ val tls = b.add(clientTls(EagerClose))
+ s ~> tls.in1; tls.out1 ~> o1
+ o2 <~ tls.out2; tls.in2 <~ Source.failed(ex)
+ ClosedShape
}).run()
the[Exception] thrownBy Await.result(out1, 1.second) should be(ex)
the[Exception] thrownBy Await.result(out2, 1.second) should be(ex)
@@ -423,12 +422,11 @@ class TlsSpec extends AkkaSpec("akka.loglevel=INFO\nakka.actor.debug.receive=off
"reliably cancel subscriptions when UserIn fails early" in assertAllStagesStopped {
val ex = new Exception("hello")
val (sub, out1, out2) =
- RunnableGraph.fromGraph(GraphDSL.create(Source.asSubscriber[ByteString], Sink.head[ByteString], Sink.head[SslTlsInbound])((_, _, _)) { implicit b ⇒
- (s, o1, o2) ⇒
- val tls = b.add(clientTls(EagerClose))
- Source.failed[SslTlsOutbound](ex) ~> tls.in1; tls.out1 ~> o1
- o2 <~ tls.out2; tls.in2 <~ s
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(Source.asSubscriber[ByteString], Sink.head[ByteString], Sink.head[SslTlsInbound])((_, _, _)) { implicit b ⇒ (s, o1, o2) ⇒
+ val tls = b.add(clientTls(EagerClose))
+ Source.failed[SslTlsOutbound](ex) ~> tls.in1; tls.out1 ~> o1
+ o2 <~ tls.out2; tls.in2 <~ s
+ ClosedShape
}).run()
the[Exception] thrownBy Await.result(out1, 1.second) should be(ex)
the[Exception] thrownBy Await.result(out2, 1.second) should be(ex)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala
index 2f52804bc1..eb94cc6f2d 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala
@@ -59,7 +59,8 @@ class ActorRefBackpressureSinkSpec extends AkkaSpec {
"send the elements to the ActorRef" in assertAllStagesStopped {
val fw = createActor(classOf[Fw])
- Source(List(1, 2, 3)).runWith(Sink.actorRefWithAck(fw,
+ Source(List(1, 2, 3)).runWith(Sink.actorRefWithAck(
+ fw,
initMessage, ackMessage, completeMessage))
expectMsg("start")
expectMsg(1)
@@ -70,7 +71,8 @@ class ActorRefBackpressureSinkSpec extends AkkaSpec {
"send the elements to the ActorRef2" in assertAllStagesStopped {
val fw = createActor(classOf[Fw])
- val probe = TestSource.probe[Int].to(Sink.actorRefWithAck(fw,
+ val probe = TestSource.probe[Int].to(Sink.actorRefWithAck(
+ fw,
initMessage, ackMessage, completeMessage)).run()
probe.sendNext(1)
expectMsg("start")
@@ -85,7 +87,8 @@ class ActorRefBackpressureSinkSpec extends AkkaSpec {
"cancel stream when actor terminates" in assertAllStagesStopped {
val fw = createActor(classOf[Fw])
- val publisher = TestSource.probe[Int].to(Sink.actorRefWithAck(fw,
+ val publisher = TestSource.probe[Int].to(Sink.actorRefWithAck(
+ fw,
initMessage, ackMessage, completeMessage)).run().sendNext(1)
expectMsg(initMessage)
expectMsg(1)
@@ -95,7 +98,8 @@ class ActorRefBackpressureSinkSpec extends AkkaSpec {
"send message only when backpressure received" in assertAllStagesStopped {
val fw = createActor(classOf[Fw2])
- val publisher = TestSource.probe[Int].to(Sink.actorRefWithAck(fw,
+ val publisher = TestSource.probe[Int].to(Sink.actorRefWithAck(
+ fw,
initMessage, ackMessage, completeMessage)).run()
expectMsg(initMessage)
@@ -138,7 +142,8 @@ class ActorRefBackpressureSinkSpec extends AkkaSpec {
"work with one element buffer" in assertAllStagesStopped {
val fw = createActor(classOf[Fw2])
val publisher =
- TestSource.probe[Int].to(Sink.actorRefWithAck(fw,
+ TestSource.probe[Int].to(Sink.actorRefWithAck(
+ fw,
initMessage, ackMessage, completeMessage)
.withAttributes(inputBuffer(1, 1))).run()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala
index 8caecb9065..85b8ff4212 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala
@@ -27,13 +27,12 @@ class BidiFlowSpec extends AkkaSpec {
Flow[Long].map(x ⇒ x.toInt + 2).withAttributes(name("top")),
Flow[String].map(ByteString(_)).withAttributes(name("bottom")))
- val bidiMat = BidiFlow.fromGraph(GraphDSL.create(Sink.head[Int]) { implicit b ⇒
- s ⇒
- Source.single(42) ~> s
+ val bidiMat = BidiFlow.fromGraph(GraphDSL.create(Sink.head[Int]) { implicit b ⇒ s ⇒
+ Source.single(42) ~> s
- val top = b.add(Flow[Int].map(x ⇒ x.toLong + 2))
- val bottom = b.add(Flow[ByteString].map(_.decodeString("UTF-8")))
- BidiShape(top.in, top.out, bottom.in, bottom.out)
+ val top = b.add(Flow[Int].map(x ⇒ x.toLong + 2))
+ val bottom = b.add(Flow[ByteString].map(_.decodeString("UTF-8")))
+ BidiShape(top.in, top.out, bottom.in, bottom.out)
})
val str = "Hello World"
@@ -42,13 +41,12 @@ class BidiFlowSpec extends AkkaSpec {
"A BidiFlow" must {
"work top/bottom in isolation" in {
- val (top, bottom) = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Long], Sink.head[String])(Keep.both) { implicit b ⇒
- (st, sb) ⇒
- val s = b.add(bidi)
+ val (top, bottom) = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Long], Sink.head[String])(Keep.both) { implicit b ⇒ (st, sb) ⇒
+ val s = b.add(bidi)
- Source.single(1) ~> s.in1; s.out1 ~> st
- sb <~ s.out2; s.in2 <~ Source.single(bytes)
- ClosedShape
+ Source.single(1) ~> s.in1; s.out1 ~> st
+ sb <~ s.out2; s.in2 <~ Source.single(bytes)
+ ClosedShape
}).run()
Await.result(top, 1.second) should ===(3)
@@ -81,30 +79,27 @@ class BidiFlowSpec extends AkkaSpec {
}
"materialize to its value" in {
- val f = RunnableGraph.fromGraph(GraphDSL.create(bidiMat) { implicit b ⇒
- bidi ⇒
- Flow[String].map(Integer.valueOf(_).toInt) <~> bidi <~> Flow[Long].map(x ⇒ ByteString(s"Hello $x"))
- ClosedShape
+ val f = RunnableGraph.fromGraph(GraphDSL.create(bidiMat) { implicit b ⇒ bidi ⇒
+ Flow[String].map(Integer.valueOf(_).toInt) <~> bidi <~> Flow[Long].map(x ⇒ ByteString(s"Hello $x"))
+ ClosedShape
}).run()
Await.result(f, 1.second) should ===(42)
}
"combine materialization values" in assertAllStagesStopped {
- val left = Flow.fromGraph(GraphDSL.create(Sink.head[Int]) { implicit b ⇒
- sink ⇒
- val bcast = b.add(Broadcast[Int](2))
- val merge = b.add(Merge[Int](2))
- val flow = b.add(Flow[String].map(Integer.valueOf(_).toInt))
- bcast ~> sink
- Source.single(1) ~> bcast ~> merge
- flow ~> merge
- FlowShape(flow.in, merge.out)
+ val left = Flow.fromGraph(GraphDSL.create(Sink.head[Int]) { implicit b ⇒ sink ⇒
+ val bcast = b.add(Broadcast[Int](2))
+ val merge = b.add(Merge[Int](2))
+ val flow = b.add(Flow[String].map(Integer.valueOf(_).toInt))
+ bcast ~> sink
+ Source.single(1) ~> bcast ~> merge
+ flow ~> merge
+ FlowShape(flow.in, merge.out)
})
- val right = Flow.fromGraph(GraphDSL.create(Sink.head[immutable.Seq[Long]]) { implicit b ⇒
- sink ⇒
- val flow = b.add(Flow[Long].grouped(10))
- flow ~> sink
- FlowShape(flow.in, b.add(Source.single(ByteString("10"))).out)
+ val right = Flow.fromGraph(GraphDSL.create(Sink.head[immutable.Seq[Long]]) { implicit b ⇒ sink ⇒
+ val flow = b.add(Flow[Long].grouped(10))
+ flow ~> sink
+ FlowShape(flow.in, b.add(Source.single(ByteString("10"))).out)
})
val ((l, m), r) = left.joinMat(bidiMat)(Keep.both).joinMat(right)(Keep.both).run()
Await.result(l, 1.second) should ===(1)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala
index 717d23391d..f617e6f491 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala
@@ -27,7 +27,7 @@ class FlowCollectSpec extends AkkaSpec with ScriptedTest {
"collect" in {
def script = Script(TestConfig.RandomTestRange map { _ ⇒
val x = random.nextInt(0, 10000)
- Seq(x) -> (if ((x & 1) == 0) Seq((x * x).toString) else Seq.empty[String])
+ Seq(x) → (if ((x & 1) == 0) Seq((x * x).toString) else Seq.empty[String])
}: _*)
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.collect { case x if x % 2 == 0 ⇒ (x * x).toString }))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala
index f75317638b..edbe147696 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala
@@ -19,7 +19,7 @@ class FlowDropSpec extends AkkaSpec with ScriptedTest {
"A Drop" must {
"drop" in {
- def script(d: Int) = Script(TestConfig.RandomTestRange map { n ⇒ Seq(n) -> (if (n <= d) Nil else Seq(n)) }: _*)
+ def script(d: Int) = Script(TestConfig.RandomTestRange map { n ⇒ Seq(n) → (if (n <= d) Nil else Seq(n)) }: _*)
TestConfig.RandomTestRange foreach { _ ⇒
val d = Math.min(Math.max(random.nextInt(-10, 60), 0), 50)
runScript(script(d), settings)(_.drop(d))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala
index 10afb841fa..b35a89a758 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala
@@ -130,20 +130,20 @@ class FlowExpandSpec extends AkkaSpec {
"work properly with finite extrapolations" in {
val (source, sink) =
TestSource.probe[Int]
- .expand(i ⇒ Iterator.from(0).map(i -> _).take(3))
+ .expand(i ⇒ Iterator.from(0).map(i → _).take(3))
.toMat(TestSink.probe)(Keep.both)
.run()
source
.sendNext(1)
sink
.request(4)
- .expectNext(1 -> 0, 1 -> 1, 1 -> 2)
+ .expectNext(1 → 0, 1 → 1, 1 → 2)
.expectNoMsg(100.millis)
source
.sendNext(2)
.sendComplete()
sink
- .expectNext(2 -> 0)
+ .expectNext(2 → 0)
.expectComplete()
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala
index 486ed1b6b4..cc089c0e30 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala
@@ -26,7 +26,7 @@ class FlowFilterSpec extends AkkaSpec with ScriptedTest {
"A Filter" must {
"filter" in {
- def script = Script(TestConfig.RandomTestRange map { _ ⇒ val x = random.nextInt(); Seq(x) -> (if ((x & 1) == 0) Seq(x) else Seq()) }: _*)
+ def script = Script(TestConfig.RandomTestRange map { _ ⇒ val x = random.nextInt(); Seq(x) → (if ((x & 1) == 0) Seq(x) else Seq()) }: _*)
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.filter(_ % 2 == 0)))
}
@@ -66,7 +66,7 @@ class FlowFilterSpec extends AkkaSpec with ScriptedTest {
def script = Script(TestConfig.RandomTestRange map
{ _ ⇒
val x = random.nextInt()
- Seq(x) -> (if ((x & 1) == 1) Seq(x) else Seq())
+ Seq(x) → (if ((x & 1) == 1) Seq(x) else Seq())
}: _*)
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.filterNot(_ % 2 == 0)))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala
index 7e0885e5c8..7cc2d17cde 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala
@@ -31,7 +31,7 @@ import scala.concurrent.forkjoin.ThreadLocalRandom
object FlowGroupBySpec {
implicit class Lift[M](val f: SubFlow[Int, M, Source[Int, M]#Repr, RunnableGraph[M]]) extends AnyVal {
- def lift(key: Int ⇒ Int) = f.prefixAndTail(1).map(p ⇒ key(p._1.head) -> (Source.single(p._1.head) ++ p._2)).concatSubstreams
+ def lift(key: Int ⇒ Int) = f.prefixAndTail(1).map(p ⇒ key(p._1.head) → (Source.single(p._1.head) ++ p._2)).concatSubstreams
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala
index fa026efba4..5407af1732 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala
@@ -18,7 +18,7 @@ class FlowGroupedSpec extends AkkaSpec with ScriptedTest {
"A Grouped" must {
def randomSeq(n: Int) = immutable.Seq.fill(n)(random.nextInt())
- def randomTest(n: Int) = { val s = randomSeq(n); s -> immutable.Seq(s) }
+ def randomTest(n: Int) = { val s = randomSeq(n); s → immutable.Seq(s) }
"group evenly" in {
val testLen = random.nextInt(1, 16)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala
index b77757fd72..6755377f67 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala
@@ -126,13 +126,13 @@ class FlowGroupedWithinSpec extends AkkaSpec with ScriptedTest {
}
"group evenly" in {
- def script = Script(TestConfig.RandomTestRange map { _ ⇒ val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) }: _*)
+ def script = Script(TestConfig.RandomTestRange map { _ ⇒ val x, y, z = random.nextInt(); Seq(x, y, z) → Seq(immutable.Seq(x, y, z)) }: _*)
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.groupedWithin(3, 10.minutes)))
}
"group with rest" in {
- def script = Script((TestConfig.RandomTestRange.map { _ ⇒ val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) }
- :+ { val x = random.nextInt(); Seq(x) -> Seq(immutable.Seq(x)) }): _*)
+ def script = Script((TestConfig.RandomTestRange.map { _ ⇒ val x, y, z = random.nextInt(); Seq(x, y, z) → Seq(immutable.Seq(x, y, z)) }
+ :+ { val x = random.nextInt(); Seq(x) → Seq(immutable.Seq(x)) }): _*)
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.groupedWithin(3, 10.minutes)))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala
index 27f0519f5f..6c98f623b1 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala
@@ -60,16 +60,15 @@ class FlowJoinSpec extends AkkaSpec(ConfigFactory.parseString("akka.loglevel=INF
"allow for merge cycle" in assertAllStagesStopped {
val source = Source.single("lonely traveler")
- val flow1 = Flow.fromGraph(GraphDSL.create(Sink.head[String]) { implicit b ⇒
- sink ⇒
- import GraphDSL.Implicits._
- val merge = b.add(Merge[String](2))
- val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
- source ~> merge.in(0)
- merge.out ~> broadcast.in
- broadcast.out(0) ~> sink
+ val flow1 = Flow.fromGraph(GraphDSL.create(Sink.head[String]) { implicit b ⇒ sink ⇒
+ import GraphDSL.Implicits._
+ val merge = b.add(Merge[String](2))
+ val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
+ source ~> merge.in(0)
+ merge.out ~> broadcast.in
+ broadcast.out(0) ~> sink
- FlowShape(merge.in(1), broadcast.out(1))
+ FlowShape(merge.in(1), broadcast.out(1))
})
whenReady(flow1.join(Flow[String]).run())(_ shouldBe "lonely traveler")
@@ -78,16 +77,15 @@ class FlowJoinSpec extends AkkaSpec(ConfigFactory.parseString("akka.loglevel=INF
"allow for merge preferred cycle" in assertAllStagesStopped {
val source = Source.single("lonely traveler")
- val flow1 = Flow.fromGraph(GraphDSL.create(Sink.head[String]) { implicit b ⇒
- sink ⇒
- import GraphDSL.Implicits._
- val merge = b.add(MergePreferred[String](1))
- val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
- source ~> merge.preferred
- merge.out ~> broadcast.in
- broadcast.out(0) ~> sink
+ val flow1 = Flow.fromGraph(GraphDSL.create(Sink.head[String]) { implicit b ⇒ sink ⇒
+ import GraphDSL.Implicits._
+ val merge = b.add(MergePreferred[String](1))
+ val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
+ source ~> merge.preferred
+ merge.out ~> broadcast.in
+ broadcast.out(0) ~> sink
- FlowShape(merge.in(0), broadcast.out(1))
+ FlowShape(merge.in(0), broadcast.out(1))
})
whenReady(flow1.join(Flow[String]).run())(_ shouldBe "lonely traveler")
@@ -96,28 +94,26 @@ class FlowJoinSpec extends AkkaSpec(ConfigFactory.parseString("akka.loglevel=INF
"allow for zip cycle" in assertAllStagesStopped {
val source = Source(immutable.Seq("traveler1", "traveler2"))
- val flow = Flow.fromGraph(GraphDSL.create(TestSink.probe[(String, String)]) { implicit b ⇒
- sink ⇒
- import GraphDSL.Implicits._
- val zip = b.add(Zip[String, String])
- val broadcast = b.add(Broadcast[(String, String)](2))
- source ~> zip.in0
- zip.out ~> broadcast.in
- broadcast.out(0) ~> sink
+ val flow = Flow.fromGraph(GraphDSL.create(TestSink.probe[(String, String)]) { implicit b ⇒ sink ⇒
+ import GraphDSL.Implicits._
+ val zip = b.add(Zip[String, String])
+ val broadcast = b.add(Broadcast[(String, String)](2))
+ source ~> zip.in0
+ zip.out ~> broadcast.in
+ broadcast.out(0) ~> sink
- FlowShape(zip.in1, broadcast.out(1))
+ FlowShape(zip.in1, broadcast.out(1))
})
- val feedback = Flow.fromGraph(GraphDSL.create(Source.single("ignition")) { implicit b ⇒
- ignition ⇒
- import GraphDSL.Implicits._
- val flow = b.add(Flow[(String, String)].map(_._1))
- val merge = b.add(Merge[String](2))
+ val feedback = Flow.fromGraph(GraphDSL.create(Source.single("ignition")) { implicit b ⇒ ignition ⇒
+ import GraphDSL.Implicits._
+ val flow = b.add(Flow[(String, String)].map(_._1))
+ val merge = b.add(Merge[String](2))
- ignition ~> merge.in(0)
- flow ~> merge.in(1)
+ ignition ~> merge.in(0)
+ flow ~> merge.in(1)
- FlowShape(flow.in, merge.out)
+ FlowShape(flow.in, merge.out)
})
val probe = flow.join(feedback).run()
@@ -126,16 +122,15 @@ class FlowJoinSpec extends AkkaSpec(ConfigFactory.parseString("akka.loglevel=INF
}
"allow for concat cycle" in assertAllStagesStopped {
- val flow = Flow.fromGraph(GraphDSL.create(TestSource.probe[String](system), Sink.head[String])(Keep.both) { implicit b ⇒
- (source, sink) ⇒
- import GraphDSL.Implicits._
- val concat = b.add(Concat[String](2))
- val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
- source ~> concat.in(0)
- concat.out ~> broadcast.in
- broadcast.out(0) ~> sink
+ val flow = Flow.fromGraph(GraphDSL.create(TestSource.probe[String](system), Sink.head[String])(Keep.both) { implicit b ⇒ (source, sink) ⇒
+ import GraphDSL.Implicits._
+ val concat = b.add(Concat[String](2))
+ val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
+ source ~> concat.in(0)
+ concat.out ~> broadcast.in
+ broadcast.out(0) ~> sink
- FlowShape(concat.in(1), broadcast.out(1))
+ FlowShape(concat.in(1), broadcast.out(1))
})
val (probe, result) = flow.join(Flow[String]).run()
@@ -149,16 +144,15 @@ class FlowJoinSpec extends AkkaSpec(ConfigFactory.parseString("akka.loglevel=INF
"allow for interleave cycle" in assertAllStagesStopped {
val source = Source.single("lonely traveler")
- val flow1 = Flow.fromGraph(GraphDSL.create(Sink.head[String]) { implicit b ⇒
- sink ⇒
- import GraphDSL.Implicits._
- val merge = b.add(Interleave[String](2, 1))
- val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
- source ~> merge.in(0)
- merge.out ~> broadcast.in
- broadcast.out(0) ~> sink
+ val flow1 = Flow.fromGraph(GraphDSL.create(Sink.head[String]) { implicit b ⇒ sink ⇒
+ import GraphDSL.Implicits._
+ val merge = b.add(Interleave[String](2, 1))
+ val broadcast = b.add(Broadcast[String](2, eagerCancel = true))
+ source ~> merge.in(0)
+ merge.out ~> broadcast.in
+ broadcast.out(0) ~> sink
- FlowShape(merge.in(1), broadcast.out(1))
+ FlowShape(merge.in(1), broadcast.out(1))
})
whenReady(flow1.join(Flow[String]).run())(_ shouldBe "lonely traveler")
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala
index f32ac774e1..8d00c85623 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala
@@ -272,15 +272,14 @@ class FlowKillSwitchSpec extends AkkaSpec {
val switch1 = KillSwitches.shared("switch")
val switch2 = KillSwitches.shared("switch")
- val downstream = RunnableGraph.fromGraph(GraphDSL.create(TestSink.probe[Int]) { implicit b ⇒
- snk ⇒
- import GraphDSL.Implicits._
- val merge = b.add(Merge[Int](2))
+ val downstream = RunnableGraph.fromGraph(GraphDSL.create(TestSink.probe[Int]) { implicit b ⇒ snk ⇒
+ import GraphDSL.Implicits._
+ val merge = b.add(Merge[Int](2))
- Source.maybe[Int].via(switch1.flow) ~> merge ~> snk
- Source.maybe[Int].via(switch2.flow) ~> merge
+ Source.maybe[Int].via(switch1.flow) ~> merge ~> snk
+ Source.maybe[Int].via(switch2.flow) ~> merge
- ClosedShape
+ ClosedShape
}).run()
downstream.ensureSubscription()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
index 37b77b3f48..40a5fdd911 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala
@@ -237,7 +237,7 @@ class FlowMapAsyncSpec extends AkkaSpec {
if (counter.incrementAndGet() > parallelism) Future.failed(new Exception("parallelism exceeded"))
else {
val p = Promise[Int]
- queue.offer(p -> System.nanoTime())
+ queue.offer(p → System.nanoTime())
p.future
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
index f772acdd96..ef741d59bd 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala
@@ -33,7 +33,7 @@ class FlowMapAsyncUnorderedSpec extends AkkaSpec {
"produce future elements in the order they are ready" in assertAllStagesStopped {
val c = TestSubscriber.manualProbe[Int]()
implicit val ec = system.dispatcher
- val latch = (1 to 4).map(_ -> TestLatch(1)).toMap
+ val latch = (1 to 4).map(_ → TestLatch(1)).toMap
val p = Source(1 to 4).mapAsyncUnordered(4)(n ⇒ Future {
Await.ready(latch(n), 5.seconds)
n
@@ -229,7 +229,7 @@ class FlowMapAsyncUnorderedSpec extends AkkaSpec {
if (counter.incrementAndGet() > parallelism) Future.failed(new Exception("parallelism exceeded"))
else {
val p = Promise[Int]
- queue.offer(p -> System.nanoTime())
+ queue.offer(p → System.nanoTime())
p.future
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
index d3eb54fb0c..5a7523a0c2 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala
@@ -20,12 +20,12 @@ class FlowMapConcatSpec extends AkkaSpec with ScriptedTest {
"map and concat" in {
val script = Script(
- Seq(0) -> Seq(),
- Seq(1) -> Seq(1),
- Seq(2) -> Seq(2, 2),
- Seq(3) -> Seq(3, 3, 3),
- Seq(2) -> Seq(2, 2),
- Seq(1) -> Seq(1))
+ Seq(0) → Seq(),
+ Seq(1) → Seq(1),
+ Seq(2) → Seq(2, 2),
+ Seq(3) → Seq(3, 3, 3),
+ Seq(2) → Seq(2, 2),
+ Seq(1) → Seq(1))
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.mapConcat(x ⇒ (1 to x) map (_ ⇒ x))))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala
index 3a585f2686..d5be156182 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala
@@ -19,7 +19,7 @@ class FlowMapSpec extends AkkaSpec with ScriptedTest {
"A Map" must {
"map" in {
- def script = Script(TestConfig.RandomTestRange map { _ ⇒ val x = random.nextInt(); Seq(x) -> Seq(x.toString) }: _*)
+ def script = Script(TestConfig.RandomTestRange map { _ ⇒ val x = random.nextInt(); Seq(x) → Seq(x.toString) }: _*)
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.map(_.toString)))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala
index c1803aebe8..8e139ba23c 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala
@@ -96,7 +96,7 @@ class FlowSpec extends AkkaSpec(ConfigFactory.parseString("akka.actor.debug.rece
"A Flow" must {
- for ((name, op) ← List("identity" -> identity, "identity2" -> identity2); n ← List(1, 2, 4)) {
+ for ((name, op) ← List("identity" → identity, "identity2" → identity2); n ← List(1, 2, 4)) {
s"request initial elements from upstream ($name, $n)" in {
new ChainSetup(op, settings.withInputBuffer(initialSize = n, maxSize = n), toPublisher) {
upstream.expectRequest(upstreamSubscription, settings.maxInputBufferSize)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala
index 854942bc31..94b3d34331 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala
@@ -47,8 +47,8 @@ class FlowSplitAfterSpec extends AkkaSpec {
}
class SubstreamsSupport(
- splitAfter: Int = 3,
- elementCount: Int = 6,
+ splitAfter: Int = 3,
+ elementCount: Int = 6,
substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) {
val source = Source(1 to elementCount)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala
index 92f63ee570..c1aa286526 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala
@@ -37,8 +37,8 @@ class FlowSplitWhenSpec extends AkkaSpec {
}
class SubstreamsSupport(
- splitWhen: Int = 3,
- elementCount: Int = 6,
+ splitWhen: Int = 3,
+ elementCount: Int = 6,
substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) {
val source = Source(1 to elementCount)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala
index 922f6f9db7..76d24bf192 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala
@@ -20,10 +20,10 @@ class FlowStatefulMapConcatSpec extends AkkaSpec with ScriptedTest {
"work in happy case" in {
val script = Script(
- Seq(2) -> Seq(),
- Seq(1) -> Seq(1, 1),
- Seq(3) -> Seq(3),
- Seq(6) -> Seq(6, 6, 6))
+ Seq(2) → Seq(),
+ Seq(1) → Seq(1, 1),
+ Seq(3) → Seq(3),
+ Seq(6) → Seq(6, 6, 6))
TestConfig.RandomTestRange foreach (_ ⇒ runScript(script, settings)(_.statefulMapConcat(() ⇒ {
var prev: Option[Int] = None
x ⇒ prev match {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala
index 133ef846e2..380c291e2f 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala
@@ -26,7 +26,7 @@ class FlowTakeSpec extends AkkaSpec with ScriptedTest {
"A Take" must {
"take" in {
- def script(d: Int) = Script(TestConfig.RandomTestRange map { n ⇒ Seq(n) -> (if (n > d) Nil else Seq(n)) }: _*)
+ def script(d: Int) = Script(TestConfig.RandomTestRange map { n ⇒ Seq(n) → (if (n > d) Nil else Seq(n)) }: _*)
TestConfig.RandomTestRange foreach { _ ⇒
val d = Math.min(Math.max(random.nextInt(-10, 60), 0), 50)
runScript(script(d), settings)(_.take(d))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala
index ee3e41a9a0..67bb7f1f1f 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala
@@ -63,10 +63,9 @@ class GraphFlowSpec extends AkkaSpec {
"work with a Source and Sink" in {
val probe = TestSubscriber.manualProbe[Int]()
- val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- import GraphDSL.Implicits._
- FlowShape(partial.in, partial.out.map(_.toInt).outlet)
+ val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ import GraphDSL.Implicits._
+ FlowShape(partial.in, partial.out.map(_.toInt).outlet)
})
source1.via(flow).to(Sink.fromSubscriber(probe)).run()
@@ -77,8 +76,7 @@ class GraphFlowSpec extends AkkaSpec {
"be transformable with a Pipe" in {
val probe = TestSubscriber.manualProbe[Int]()
- val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒ FlowShape(partial.in, partial.out)
+ val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒ FlowShape(partial.in, partial.out)
})
source1.via(flow).map(_.toInt).to(Sink.fromSubscriber(probe)).run()
@@ -89,14 +87,12 @@ class GraphFlowSpec extends AkkaSpec {
"work with another GraphFlow" in {
val probe = TestSubscriber.manualProbe[Int]()
- val flow1 = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- FlowShape(partial.in, partial.out)
+ val flow1 = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ FlowShape(partial.in, partial.out)
})
- val flow2 = Flow.fromGraph(GraphDSL.create(Flow[String].map(_.toInt)) { implicit b ⇒
- importFlow ⇒
- FlowShape(importFlow.in, importFlow.out)
+ val flow2 = Flow.fromGraph(GraphDSL.create(Flow[String].map(_.toInt)) { implicit b ⇒ importFlow ⇒
+ FlowShape(importFlow.in, importFlow.out)
})
source1.via(flow1).via(flow2).to(Sink.fromSubscriber(probe)).run()
@@ -107,8 +103,7 @@ class GraphFlowSpec extends AkkaSpec {
"be reusable multiple times" in {
val probe = TestSubscriber.manualProbe[Int]()
- val flow = Flow.fromGraph(GraphDSL.create(Flow[Int].map(_ * 2)) { implicit b ⇒
- importFlow ⇒ FlowShape(importFlow.in, importFlow.out)
+ val flow = Flow.fromGraph(GraphDSL.create(Flow[Int].map(_ * 2)) { implicit b ⇒ importFlow ⇒ FlowShape(importFlow.in, importFlow.out)
})
RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
@@ -125,11 +120,10 @@ class GraphFlowSpec extends AkkaSpec {
"work with a Sink" in {
val probe = TestSubscriber.manualProbe[Int]()
- val source = Source.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- import GraphDSL.Implicits._
- source1 ~> partial.in
- SourceShape(partial.out.map(_.toInt).outlet)
+ val source = Source.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ import GraphDSL.Implicits._
+ source1 ~> partial.in
+ SourceShape(partial.out.map(_.toInt).outlet)
})
source.to(Sink.fromSubscriber(probe)).run()
@@ -150,11 +144,10 @@ class GraphFlowSpec extends AkkaSpec {
val probe = TestSubscriber.manualProbe[Int]()
- val source = Source.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- import GraphDSL.Implicits._
- source1 ~> partial.in
- SourceShape(partial.out)
+ val source = Source.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ import GraphDSL.Implicits._
+ source1 ~> partial.in
+ SourceShape(partial.out)
})
source.map(_.toInt).to(Sink.fromSubscriber(probe)).run()
@@ -165,16 +158,14 @@ class GraphFlowSpec extends AkkaSpec {
"work with an GraphFlow" in {
val probe = TestSubscriber.manualProbe[Int]()
- val source = Source.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- import GraphDSL.Implicits._
- source1 ~> partial.in
- SourceShape(partial.out)
+ val source = Source.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ import GraphDSL.Implicits._
+ source1 ~> partial.in
+ SourceShape(partial.out)
})
- val flow = Flow.fromGraph(GraphDSL.create(Flow[String].map(_.toInt)) { implicit b ⇒
- importFlow ⇒
- FlowShape(importFlow.in, importFlow.out)
+ val flow = Flow.fromGraph(GraphDSL.create(Flow[String].map(_.toInt)) { implicit b ⇒ importFlow ⇒
+ FlowShape(importFlow.in, importFlow.out)
})
source.via(flow).to(Sink.fromSubscriber(probe)).run()
@@ -185,20 +176,18 @@ class GraphFlowSpec extends AkkaSpec {
"be reusable multiple times" in {
val probe = TestSubscriber.manualProbe[Int]()
- val source = Source.fromGraph(GraphDSL.create(Source(1 to 5)) { implicit b ⇒
- s ⇒
- import GraphDSL.Implicits._
- SourceShape(s.out.map(_ * 2).outlet)
+ val source = Source.fromGraph(GraphDSL.create(Source(1 to 5)) { implicit b ⇒ s ⇒
+ import GraphDSL.Implicits._
+ SourceShape(s.out.map(_ * 2).outlet)
})
- RunnableGraph.fromGraph(GraphDSL.create(source, source)(Keep.both) { implicit b ⇒
- (s1, s2) ⇒
- import GraphDSL.Implicits._
- val merge = b.add(Merge[Int](2))
- s1.out ~> merge.in(0)
- merge.out ~> Sink.fromSubscriber(probe)
- s2.out.map(_ * 10) ~> merge.in(1)
- ClosedShape
+ RunnableGraph.fromGraph(GraphDSL.create(source, source)(Keep.both) { implicit b ⇒ (s1, s2) ⇒
+ import GraphDSL.Implicits._
+ val merge = b.add(Merge[Int](2))
+ s1.out ~> merge.in(0)
+ merge.out ~> Sink.fromSubscriber(probe)
+ s2.out.map(_ * 10) ~> merge.in(1)
+ ClosedShape
}).run()
validateProbe(probe, 10, Set(2, 4, 6, 8, 10, 20, 40, 60, 80, 100))
@@ -209,11 +198,10 @@ class GraphFlowSpec extends AkkaSpec {
"work with a Source" in {
val probe = TestSubscriber.manualProbe[Int]()
- val sink = Sink.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- import GraphDSL.Implicits._
- partial.out.map(_.toInt) ~> Sink.fromSubscriber(probe)
- SinkShape(partial.in)
+ val sink = Sink.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ import GraphDSL.Implicits._
+ partial.out.map(_.toInt) ~> Sink.fromSubscriber(probe)
+ SinkShape(partial.in)
})
source1.to(sink).run()
@@ -225,8 +213,7 @@ class GraphFlowSpec extends AkkaSpec {
val probe = TestSubscriber.manualProbe[Int]()
val pubSink = Sink.asPublisher[Int](false)
- val sink = Sink.fromGraph(GraphDSL.create(pubSink) { implicit b ⇒
- p ⇒ SinkShape(p.in)
+ val sink = Sink.fromGraph(GraphDSL.create(pubSink) { implicit b ⇒ p ⇒ SinkShape(p.in)
})
val mm = source1.runWith(sink)
@@ -238,12 +225,11 @@ class GraphFlowSpec extends AkkaSpec {
"be transformable with a Pipe" in {
val probe = TestSubscriber.manualProbe[Int]()
- val sink = Sink.fromGraph(GraphDSL.create(partialGraph, Flow[String].map(_.toInt))(Keep.both) { implicit b ⇒
- (partial, flow) ⇒
- import GraphDSL.Implicits._
- flow.out ~> partial.in
- partial.out.map(_.toInt) ~> Sink.fromSubscriber(probe)
- SinkShape(flow.in)
+ val sink = Sink.fromGraph(GraphDSL.create(partialGraph, Flow[String].map(_.toInt))(Keep.both) { implicit b ⇒ (partial, flow) ⇒
+ import GraphDSL.Implicits._
+ flow.out ~> partial.in
+ partial.out.map(_.toInt) ~> Sink.fromSubscriber(probe)
+ SinkShape(flow.in)
})
val iSink = Flow[Int].map(_.toString).to(sink)
@@ -256,16 +242,14 @@ class GraphFlowSpec extends AkkaSpec {
val probe = TestSubscriber.manualProbe[Int]()
- val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- FlowShape(partial.in, partial.out)
+ val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ FlowShape(partial.in, partial.out)
})
- val sink = Sink.fromGraph(GraphDSL.create(Flow[String].map(_.toInt)) { implicit b ⇒
- flow ⇒
- import GraphDSL.Implicits._
- flow.out ~> Sink.fromSubscriber(probe)
- SinkShape(flow.in)
+ val sink = Sink.fromGraph(GraphDSL.create(Flow[String].map(_.toInt)) { implicit b ⇒ flow ⇒
+ import GraphDSL.Implicits._
+ flow.out ~> Sink.fromSubscriber(probe)
+ SinkShape(flow.in)
})
source1.via(flow).to(sink).run()
@@ -280,32 +264,28 @@ class GraphFlowSpec extends AkkaSpec {
val inSource = Source.asSubscriber[Int]
val outSink = Sink.asPublisher[Int](false)
- val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒
- partial ⇒
- import GraphDSL.Implicits._
- FlowShape(partial.in, partial.out.map(_.toInt).outlet)
+ val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b ⇒ partial ⇒
+ import GraphDSL.Implicits._
+ FlowShape(partial.in, partial.out.map(_.toInt).outlet)
})
- val source = Source.fromGraph(GraphDSL.create(Flow[Int].map(_.toString), inSource)(Keep.right) { implicit b ⇒
- (flow, src) ⇒
- import GraphDSL.Implicits._
- src.out ~> flow.in
- SourceShape(flow.out)
+ val source = Source.fromGraph(GraphDSL.create(Flow[Int].map(_.toString), inSource)(Keep.right) { implicit b ⇒ (flow, src) ⇒
+ import GraphDSL.Implicits._
+ src.out ~> flow.in
+ SourceShape(flow.out)
})
- val sink = Sink.fromGraph(GraphDSL.create(Flow[String].map(_.toInt), outSink)(Keep.right) { implicit b ⇒
- (flow, snk) ⇒
- import GraphDSL.Implicits._
- flow.out ~> snk.in
- SinkShape(flow.in)
+ val sink = Sink.fromGraph(GraphDSL.create(Flow[String].map(_.toInt), outSink)(Keep.right) { implicit b ⇒ (flow, snk) ⇒
+ import GraphDSL.Implicits._
+ flow.out ~> snk.in
+ SinkShape(flow.in)
})
- val (m1, m2, m3) = RunnableGraph.fromGraph(GraphDSL.create(source, flow, sink)(Tuple3.apply) { implicit b ⇒
- (src, f, snk) ⇒
- import GraphDSL.Implicits._
- src.out.map(_.toInt) ~> f.in
- f.out.map(_.toString) ~> snk.in
- ClosedShape
+ val (m1, m2, m3) = RunnableGraph.fromGraph(GraphDSL.create(source, flow, sink)(Tuple3.apply) { implicit b ⇒ (src, f, snk) ⇒
+ import GraphDSL.Implicits._
+ src.out.map(_.toInt) ~> f.in
+ f.out.map(_.toString) ~> snk.in
+ ClosedShape
}).run()
val subscriber = m1
@@ -321,21 +301,18 @@ class GraphFlowSpec extends AkkaSpec {
val inSource = Source.asSubscriber[Int]
val outSink = Sink.asPublisher[Int](false)
- val source = Source.fromGraph(GraphDSL.create(inSource) { implicit b ⇒
- src ⇒
- SourceShape(src.out)
+ val source = Source.fromGraph(GraphDSL.create(inSource) { implicit b ⇒ src ⇒
+ SourceShape(src.out)
})
- val sink = Sink.fromGraph(GraphDSL.create(outSink) { implicit b ⇒
- snk ⇒
- SinkShape(snk.in)
+ val sink = Sink.fromGraph(GraphDSL.create(outSink) { implicit b ⇒ snk ⇒
+ SinkShape(snk.in)
})
- val (m1, m2) = RunnableGraph.fromGraph(GraphDSL.create(source, sink)(Keep.both) { implicit b ⇒
- (src, snk) ⇒
- import GraphDSL.Implicits._
- src.out ~> snk.in
- ClosedShape
+ val (m1, m2) = RunnableGraph.fromGraph(GraphDSL.create(source, sink)(Keep.both) { implicit b ⇒ (src, snk) ⇒
+ import GraphDSL.Implicits._
+ src.out ~> snk.in
+ ClosedShape
}).run()
val subscriber = m1
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala
index 81f755995d..25737fdf65 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala
@@ -47,13 +47,12 @@ class GraphBalanceSpec extends AkkaSpec {
"support waiting for demand from all downstream subscriptions" in {
val s1 = TestSubscriber.manualProbe[Int]()
- val p2 = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false)) { implicit b ⇒
- p2Sink ⇒
- val balance = b.add(Balance[Int](2, waitForAllDownstreams = true))
- Source(List(1, 2, 3)) ~> balance.in
- balance.out(0) ~> Sink.fromSubscriber(s1)
- balance.out(1) ~> p2Sink
- ClosedShape
+ val p2 = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false)) { implicit b ⇒ p2Sink ⇒
+ val balance = b.add(Balance[Int](2, waitForAllDownstreams = true))
+ Source(List(1, 2, 3)) ~> balance.in
+ balance.out(0) ~> Sink.fromSubscriber(s1)
+ balance.out(1) ~> p2Sink
+ ClosedShape
}).run()
val sub1 = s1.expectSubscription()
@@ -78,14 +77,13 @@ class GraphBalanceSpec extends AkkaSpec {
"support waiting for demand from all non-cancelled downstream subscriptions" in assertAllStagesStopped {
val s1 = TestSubscriber.manualProbe[Int]()
- val (p2, p3) = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { implicit b ⇒
- (p2Sink, p3Sink) ⇒
- val balance = b.add(Balance[Int](3, waitForAllDownstreams = true))
- Source(List(1, 2, 3)) ~> balance.in
- balance.out(0) ~> Sink.fromSubscriber(s1)
- balance.out(1) ~> p2Sink
- balance.out(2) ~> p3Sink
- ClosedShape
+ val (p2, p3) = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { implicit b ⇒ (p2Sink, p3Sink) ⇒
+ val balance = b.add(Balance[Int](3, waitForAllDownstreams = true))
+ Source(List(1, 2, 3)) ~> balance.in
+ balance.out(0) ~> Sink.fromSubscriber(s1)
+ balance.out(1) ~> p2Sink
+ balance.out(2) ~> p3Sink
+ ClosedShape
}).run()
val sub1 = s1.expectSubscription()
@@ -125,17 +123,15 @@ class GraphBalanceSpec extends AkkaSpec {
"work with 5-way balance" in {
val sink = Sink.head[Seq[Int]]
- val (s1, s2, s3, s4, s5) = RunnableGraph.fromGraph(GraphDSL.create(sink, sink, sink, sink, sink)(Tuple5.apply) {
- implicit b ⇒
- (f1, f2, f3, f4, f5) ⇒
- val balance = b.add(Balance[Int](5, waitForAllDownstreams = true))
- Source(0 to 14) ~> balance.in
- balance.out(0).grouped(15) ~> f1
- balance.out(1).grouped(15) ~> f2
- balance.out(2).grouped(15) ~> f3
- balance.out(3).grouped(15) ~> f4
- balance.out(4).grouped(15) ~> f5
- ClosedShape
+ val (s1, s2, s3, s4, s5) = RunnableGraph.fromGraph(GraphDSL.create(sink, sink, sink, sink, sink)(Tuple5.apply) { implicit b ⇒ (f1, f2, f3, f4, f5) ⇒
+ val balance = b.add(Balance[Int](5, waitForAllDownstreams = true))
+ Source(0 to 14) ~> balance.in
+ balance.out(0).grouped(15) ~> f1
+ balance.out(1).grouped(15) ~> f2
+ balance.out(2).grouped(15) ~> f3
+ balance.out(3).grouped(15) ~> f4
+ balance.out(4).grouped(15) ~> f5
+ ClosedShape
}).run()
Set(s1, s2, s3, s4, s5) flatMap (Await.result(_, 3.seconds)) should be((0 to 14).toSet)
@@ -145,14 +141,13 @@ class GraphBalanceSpec extends AkkaSpec {
val numElementsForSink = 10000
val outputs = Sink.fold[Int, Int](0)(_ + _)
- val results = RunnableGraph.fromGraph(GraphDSL.create(outputs, outputs, outputs)(List(_, _, _)) { implicit b ⇒
- (o1, o2, o3) ⇒
- val balance = b.add(Balance[Int](3, waitForAllDownstreams = true))
- Source.repeat(1).take(numElementsForSink * 3) ~> balance.in
- balance.out(0) ~> o1
- balance.out(1) ~> o2
- balance.out(2) ~> o3
- ClosedShape
+ val results = RunnableGraph.fromGraph(GraphDSL.create(outputs, outputs, outputs)(List(_, _, _)) { implicit b ⇒ (o1, o2, o3) ⇒
+ val balance = b.add(Balance[Int](3, waitForAllDownstreams = true))
+ Source.repeat(1).take(numElementsForSink * 3) ~> balance.in
+ balance.out(0) ~> o1
+ balance.out(1) ~> o2
+ balance.out(2) ~> o3
+ ClosedShape
}).run()
import system.dispatcher
@@ -165,14 +160,13 @@ class GraphBalanceSpec extends AkkaSpec {
"fairly balance between three outputs" in {
val probe = TestSink.probe[Int]
- val (p1, p2, p3) = RunnableGraph.fromGraph(GraphDSL.create(probe, probe, probe)(Tuple3.apply) { implicit b ⇒
- (o1, o2, o3) ⇒
- val balance = b.add(Balance[Int](3))
- Source(1 to 7) ~> balance.in
- balance.out(0) ~> o1
- balance.out(1) ~> o2
- balance.out(2) ~> o3
- ClosedShape
+ val (p1, p2, p3) = RunnableGraph.fromGraph(GraphDSL.create(probe, probe, probe)(Tuple3.apply) { implicit b ⇒ (o1, o2, o3) ⇒
+ val balance = b.add(Balance[Int](3))
+ Source(1 to 7) ~> balance.in
+ balance.out(0) ~> o1
+ balance.out(1) ~> o2
+ balance.out(2) ~> o3
+ ClosedShape
}).run()
p1.requestNext(1)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala
index 9997a01d68..6de5b1a1a8 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala
@@ -71,17 +71,16 @@ class GraphBroadcastSpec extends AkkaSpec {
headSink,
headSink,
headSink)(
- (fut1, fut2, fut3, fut4, fut5) ⇒ Future.sequence(List(fut1, fut2, fut3, fut4, fut5))) { implicit b ⇒
- (p1, p2, p3, p4, p5) ⇒
- val bcast = b.add(Broadcast[Int](5))
- Source(List(1, 2, 3)) ~> bcast.in
- bcast.out(0).grouped(5) ~> p1.in
- bcast.out(1).grouped(5) ~> p2.in
- bcast.out(2).grouped(5) ~> p3.in
- bcast.out(3).grouped(5) ~> p4.in
- bcast.out(4).grouped(5) ~> p5.in
- ClosedShape
- }).run()
+ (fut1, fut2, fut3, fut4, fut5) ⇒ Future.sequence(List(fut1, fut2, fut3, fut4, fut5))) { implicit b ⇒ (p1, p2, p3, p4, p5) ⇒
+ val bcast = b.add(Broadcast[Int](5))
+ Source(List(1, 2, 3)) ~> bcast.in
+ bcast.out(0).grouped(5) ~> p1.in
+ bcast.out(1).grouped(5) ~> p2.in
+ bcast.out(2).grouped(5) ~> p3.in
+ bcast.out(3).grouped(5) ~> p4.in
+ bcast.out(4).grouped(5) ~> p5.in
+ ClosedShape
+ }).run()
Await.result(result, 3.seconds) should be(List.fill(5)(List(1, 2, 3)))
}
@@ -101,35 +100,33 @@ class GraphBroadcastSpec extends AkkaSpec {
headSink, headSink, headSink, headSink, headSink,
headSink, headSink, headSink, headSink, headSink,
headSink, headSink, headSink, headSink, headSink,
- headSink, headSink)(combine) {
- implicit b ⇒
- (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20, p21, p22) ⇒
- val bcast = b.add(Broadcast[Int](22))
- Source(List(1, 2, 3)) ~> bcast.in
- bcast.out(0).grouped(5) ~> p1.in
- bcast.out(1).grouped(5) ~> p2.in
- bcast.out(2).grouped(5) ~> p3.in
- bcast.out(3).grouped(5) ~> p4.in
- bcast.out(4).grouped(5) ~> p5.in
- bcast.out(5).grouped(5) ~> p6.in
- bcast.out(6).grouped(5) ~> p7.in
- bcast.out(7).grouped(5) ~> p8.in
- bcast.out(8).grouped(5) ~> p9.in
- bcast.out(9).grouped(5) ~> p10.in
- bcast.out(10).grouped(5) ~> p11.in
- bcast.out(11).grouped(5) ~> p12.in
- bcast.out(12).grouped(5) ~> p13.in
- bcast.out(13).grouped(5) ~> p14.in
- bcast.out(14).grouped(5) ~> p15.in
- bcast.out(15).grouped(5) ~> p16.in
- bcast.out(16).grouped(5) ~> p17.in
- bcast.out(17).grouped(5) ~> p18.in
- bcast.out(18).grouped(5) ~> p19.in
- bcast.out(19).grouped(5) ~> p20.in
- bcast.out(20).grouped(5) ~> p21.in
- bcast.out(21).grouped(5) ~> p22.in
- ClosedShape
- }).run()
+ headSink, headSink)(combine) { implicit b ⇒ (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20, p21, p22) ⇒
+ val bcast = b.add(Broadcast[Int](22))
+ Source(List(1, 2, 3)) ~> bcast.in
+ bcast.out(0).grouped(5) ~> p1.in
+ bcast.out(1).grouped(5) ~> p2.in
+ bcast.out(2).grouped(5) ~> p3.in
+ bcast.out(3).grouped(5) ~> p4.in
+ bcast.out(4).grouped(5) ~> p5.in
+ bcast.out(5).grouped(5) ~> p6.in
+ bcast.out(6).grouped(5) ~> p7.in
+ bcast.out(7).grouped(5) ~> p8.in
+ bcast.out(8).grouped(5) ~> p9.in
+ bcast.out(9).grouped(5) ~> p10.in
+ bcast.out(10).grouped(5) ~> p11.in
+ bcast.out(11).grouped(5) ~> p12.in
+ bcast.out(12).grouped(5) ~> p13.in
+ bcast.out(13).grouped(5) ~> p14.in
+ bcast.out(14).grouped(5) ~> p15.in
+ bcast.out(15).grouped(5) ~> p16.in
+ bcast.out(16).grouped(5) ~> p17.in
+ bcast.out(17).grouped(5) ~> p18.in
+ bcast.out(18).grouped(5) ~> p19.in
+ bcast.out(19).grouped(5) ~> p20.in
+ bcast.out(20).grouped(5) ~> p21.in
+ bcast.out(21).grouped(5) ~> p22.in
+ ClosedShape
+ }).run()
Await.result(result, 3.seconds) should be(List.fill(22)(List(1, 2, 3)))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala
index 496d4cbbcc..aed95477e2 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala
@@ -200,7 +200,7 @@ class GraphDSLCompileSpec extends AkkaSpec {
val unzip = b.add(Unzip[Int, String]())
val out = Sink.asPublisher[(Int, String)](false)
import GraphDSL.Implicits._
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out0 ~> Flow[Int].map(_ * 2) ~> zip.in0
unzip.out1 ~> zip.in1
zip.out ~> out
@@ -298,7 +298,7 @@ class GraphDSLCompileSpec extends AkkaSpec {
b.add(Source.fromIterator(apples)) ~> Flow[Apple] ~> b.add(Sink.asPublisher[Fruit](false))
appleSource ~> Flow[Apple] ~> merge.in(10)
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out1 ~> whatever
unzip.out0 ~> b.add(Sink.asPublisher[Any](false))
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala
index 2452adbce9..631dcc5a54 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala
@@ -30,11 +30,10 @@ class GraphMatValueSpec extends AkkaSpec {
"expose the materialized value as source" in {
val sub = TestSubscriber.manualProbe[Int]()
- val f = RunnableGraph.fromGraph(GraphDSL.create(foldSink) { implicit b ⇒
- fold ⇒
- Source(1 to 10) ~> fold
- b.materializedValue.mapAsync(4)(identity) ~> Sink.fromSubscriber(sub)
- ClosedShape
+ val f = RunnableGraph.fromGraph(GraphDSL.create(foldSink) { implicit b ⇒ fold ⇒
+ Source(1 to 10) ~> fold
+ b.materializedValue.mapAsync(4)(identity) ~> Sink.fromSubscriber(sub)
+ ClosedShape
}).run()
val r1 = Await.result(f, 3.seconds)
@@ -47,15 +46,14 @@ class GraphMatValueSpec extends AkkaSpec {
"expose the materialized value as source multiple times" in {
val sub = TestSubscriber.manualProbe[Int]()
- val f = RunnableGraph.fromGraph(GraphDSL.create(foldSink) { implicit b ⇒
- fold ⇒
- val zip = b.add(ZipWith[Int, Int, Int](_ + _))
- Source(1 to 10) ~> fold
- b.materializedValue.mapAsync(4)(identity) ~> zip.in0
- b.materializedValue.mapAsync(4)(identity) ~> zip.in1
+ val f = RunnableGraph.fromGraph(GraphDSL.create(foldSink) { implicit b ⇒ fold ⇒
+ val zip = b.add(ZipWith[Int, Int, Int](_ + _))
+ Source(1 to 10) ~> fold
+ b.materializedValue.mapAsync(4)(identity) ~> zip.in0
+ b.materializedValue.mapAsync(4)(identity) ~> zip.in1
- zip.out ~> Sink.fromSubscriber(sub)
- ClosedShape
+ zip.out ~> Sink.fromSubscriber(sub)
+ ClosedShape
}).run()
val r1 = Await.result(f, 3.seconds)
@@ -66,10 +64,9 @@ class GraphMatValueSpec extends AkkaSpec {
}
// Exposes the materialized value as a stream value
- val foldFeedbackSource: Source[Future[Int], Future[Int]] = Source.fromGraph(GraphDSL.create(foldSink) { implicit b ⇒
- fold ⇒
- Source(1 to 10) ~> fold
- SourceShape(b.materializedValue)
+ val foldFeedbackSource: Source[Future[Int], Future[Int]] = Source.fromGraph(GraphDSL.create(foldSink) { implicit b ⇒ fold ⇒
+ Source(1 to 10) ~> fold
+ SourceShape(b.materializedValue)
})
"allow exposing the materialized value as port" in {
@@ -84,21 +81,19 @@ class GraphMatValueSpec extends AkkaSpec {
}
"work properly with nesting and reusing" in {
- val compositeSource1 = Source.fromGraph(GraphDSL.create(foldFeedbackSource, foldFeedbackSource)(Keep.both) { implicit b ⇒
- (s1, s2) ⇒
- val zip = b.add(ZipWith[Int, Int, Int](_ + _))
+ val compositeSource1 = Source.fromGraph(GraphDSL.create(foldFeedbackSource, foldFeedbackSource)(Keep.both) { implicit b ⇒ (s1, s2) ⇒
+ val zip = b.add(ZipWith[Int, Int, Int](_ + _))
- s1.out.mapAsync(4)(identity) ~> zip.in0
- s2.out.mapAsync(4)(identity).map(_ * 100) ~> zip.in1
- SourceShape(zip.out)
+ s1.out.mapAsync(4)(identity) ~> zip.in0
+ s2.out.mapAsync(4)(identity).map(_ * 100) ~> zip.in1
+ SourceShape(zip.out)
})
- val compositeSource2 = Source.fromGraph(GraphDSL.create(compositeSource1, compositeSource1)(Keep.both) { implicit b ⇒
- (s1, s2) ⇒
- val zip = b.add(ZipWith[Int, Int, Int](_ + _))
- s1.out ~> zip.in0
- s2.out.map(_ * 10000) ~> zip.in1
- SourceShape(zip.out)
+ val compositeSource2 = Source.fromGraph(GraphDSL.create(compositeSource1, compositeSource1)(Keep.both) { implicit b ⇒ (s1, s2) ⇒
+ val zip = b.add(ZipWith[Int, Int, Int](_ + _))
+ s1.out ~> zip.in0
+ s2.out.map(_ * 10000) ~> zip.in1
+ SourceShape(zip.out)
})
val (((f1, f2), (f3, f4)), result) = compositeSource2.toMat(Sink.head)(Keep.both).run()
@@ -112,22 +107,18 @@ class GraphMatValueSpec extends AkkaSpec {
}
"work also when the source’s module is copied" in {
- val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(foldSink) {
- implicit builder ⇒
- fold ⇒
- FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet)
+ val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(foldSink) { implicit builder ⇒ fold ⇒
+ FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet)
})
Await.result(Source(1 to 10).via(foldFlow).runWith(Sink.head), 3.seconds) should ===(55)
}
"work also when the source’s module is copied and the graph is extended before using the matValSrc" in {
- val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(foldSink) {
- implicit builder ⇒
- fold ⇒
- val map = builder.add(Flow[Future[Int]].mapAsync(4)(identity))
- builder.materializedValue ~> map
- FlowShape(fold.in, map.outlet)
+ val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(foldSink) { implicit builder ⇒ fold ⇒
+ val map = builder.add(Flow[Future[Int]].mapAsync(4)(identity))
+ builder.materializedValue ~> map
+ FlowShape(fold.in, map.outlet)
})
Await.result(Source(1 to 10).via(foldFlow).runWith(Sink.head), 3.seconds) should ===(55)
@@ -140,11 +131,10 @@ class GraphMatValueSpec extends AkkaSpec {
Source.empty.mapMaterializedValue(_ ⇒ done = true) ~> Sink.ignore
ClosedShape
}
- val r = RunnableGraph.fromGraph(GraphDSL.create(Sink.ignore) { implicit b ⇒
- (s) ⇒
- b.add(g)
- Source(1 to 10) ~> s
- ClosedShape
+ val r = RunnableGraph.fromGraph(GraphDSL.create(Sink.ignore) { implicit b ⇒ (s) ⇒
+ b.add(g)
+ Source(1 to 10) ~> s
+ ClosedShape
})
r.run().futureValue should ===(akka.Done)
done should ===(true)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala
index 12527bdf8f..4ca4898f7c 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala
@@ -32,32 +32,30 @@ class GraphMergePreferredSpec extends TwoStreamsSetup {
val preferred = Source(Stream.fill(numElements)(1))
val aux = Source(Stream.fill(numElements)(2))
- val result = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒
- sink ⇒
- val merge = b.add(MergePreferred[Int](3))
- preferred ~> merge.preferred
+ val result = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒ sink ⇒
+ val merge = b.add(MergePreferred[Int](3))
+ preferred ~> merge.preferred
- merge.out.grouped(numElements * 2) ~> sink.in
- aux ~> merge.in(0)
- aux ~> merge.in(1)
- aux ~> merge.in(2)
- ClosedShape
+ merge.out.grouped(numElements * 2) ~> sink.in
+ aux ~> merge.in(0)
+ aux ~> merge.in(1)
+ aux ~> merge.in(2)
+ ClosedShape
}).run()
Await.result(result, 3.seconds).filter(_ == 1).size should be(numElements)
}
"eventually pass through all elements" in {
- val result = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒
- sink ⇒
- val merge = b.add(MergePreferred[Int](3))
- Source(1 to 100) ~> merge.preferred
+ val result = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒ sink ⇒
+ val merge = b.add(MergePreferred[Int](3))
+ Source(1 to 100) ~> merge.preferred
- merge.out.grouped(500) ~> sink.in
- Source(101 to 200) ~> merge.in(0)
- Source(201 to 300) ~> merge.in(1)
- Source(301 to 400) ~> merge.in(2)
- ClosedShape
+ merge.out.grouped(500) ~> sink.in
+ Source(101 to 200) ~> merge.in(0)
+ Source(201 to 300) ~> merge.in(1)
+ Source(301 to 400) ~> merge.in(2)
+ ClosedShape
}).run()
Await.result(result, 3.seconds).toSet should ===((1 to 400).toSet)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala
index ef11c89324..7308c7b849 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala
@@ -167,13 +167,12 @@ class GraphMergeSpec extends TwoStreamsSetup {
val src1 = Source.asSubscriber[Int]
val src2 = Source.asSubscriber[Int]
- val (graphSubscriber1, graphSubscriber2) = RunnableGraph.fromGraph(GraphDSL.create(src1, src2)((_, _)) { implicit b ⇒
- (s1, s2) ⇒
- val merge = b.add(Merge[Int](2))
- s1.out ~> merge.in(0)
- s2.out ~> merge.in(1)
- merge.out ~> Sink.fromSubscriber(down)
- ClosedShape
+ val (graphSubscriber1, graphSubscriber2) = RunnableGraph.fromGraph(GraphDSL.create(src1, src2)((_, _)) { implicit b ⇒ (s1, s2) ⇒
+ val merge = b.add(Merge[Int](2))
+ s1.out ~> merge.in(0)
+ s2.out ~> merge.in(1)
+ merge.out ~> Sink.fromSubscriber(down)
+ ClosedShape
}).run()
val downstream = down.expectSubscription()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala
index d5cc067fa8..cad98ad609 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala
@@ -55,16 +55,15 @@ class GraphOpsIntegrationSpec extends AkkaSpec {
"GraphDSLs" must {
"support broadcast - merge layouts" in {
- val resultFuture = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒
- (sink) ⇒
- val bcast = b.add(Broadcast[Int](2))
- val merge = b.add(Merge[Int](2))
+ val resultFuture = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒ (sink) ⇒
+ val bcast = b.add(Broadcast[Int](2))
+ val merge = b.add(Merge[Int](2))
- Source(List(1, 2, 3)) ~> bcast.in
- bcast.out(0) ~> merge.in(0)
- bcast.out(1).map(_ + 3) ~> merge.in(1)
- merge.out.grouped(10) ~> sink.in
- ClosedShape
+ Source(List(1, 2, 3)) ~> bcast.in
+ bcast.out(0) ~> merge.in(0)
+ bcast.out(1).map(_ + 3) ~> merge.in(1)
+ merge.out.grouped(10) ~> sink.in
+ ClosedShape
}).run()
Await.result(resultFuture, 3.seconds).sorted should be(List(1, 2, 3, 4, 5, 6))
@@ -72,17 +71,16 @@ class GraphOpsIntegrationSpec extends AkkaSpec {
"support balance - merge (parallelization) layouts" in {
val elements = 0 to 10
- val out = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒
- (sink) ⇒
- val balance = b.add(Balance[Int](5))
- val merge = b.add(Merge[Int](5))
+ val out = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒ (sink) ⇒
+ val balance = b.add(Balance[Int](5))
+ val merge = b.add(Merge[Int](5))
- Source(elements) ~> balance.in
+ Source(elements) ~> balance.in
- for (i ← 0 until 5) balance.out(i) ~> merge.in(i)
+ for (i ← 0 until 5) balance.out(i) ~> merge.in(i)
- merge.out.grouped(elements.size * 2) ~> sink.in
- ClosedShape
+ merge.out.grouped(elements.size * 2) ~> sink.in
+ ClosedShape
}).run()
Await.result(out, 3.seconds).sorted should be(elements)
@@ -92,43 +90,42 @@ class GraphOpsIntegrationSpec extends AkkaSpec {
// see https://en.wikipedia.org/wiki/Topological_sorting#mediaviewer/File:Directed_acyclic_graph.png
val seqSink = Sink.head[Seq[Int]]
- val (resultFuture2, resultFuture9, resultFuture10) = RunnableGraph.fromGraph(GraphDSL.create(seqSink, seqSink, seqSink)(Tuple3.apply) { implicit b ⇒
- (sink2, sink9, sink10) ⇒
- val b3 = b.add(Broadcast[Int](2))
- val b7 = b.add(Broadcast[Int](2))
- val b11 = b.add(Broadcast[Int](3))
- val m8 = b.add(Merge[Int](2))
- val m9 = b.add(Merge[Int](2))
- val m10 = b.add(Merge[Int](2))
- val m11 = b.add(Merge[Int](2))
- val in3 = Source(List(3))
- val in5 = Source(List(5))
- val in7 = Source(List(7))
+ val (resultFuture2, resultFuture9, resultFuture10) = RunnableGraph.fromGraph(GraphDSL.create(seqSink, seqSink, seqSink)(Tuple3.apply) { implicit b ⇒ (sink2, sink9, sink10) ⇒
+ val b3 = b.add(Broadcast[Int](2))
+ val b7 = b.add(Broadcast[Int](2))
+ val b11 = b.add(Broadcast[Int](3))
+ val m8 = b.add(Merge[Int](2))
+ val m9 = b.add(Merge[Int](2))
+ val m10 = b.add(Merge[Int](2))
+ val m11 = b.add(Merge[Int](2))
+ val in3 = Source(List(3))
+ val in5 = Source(List(5))
+ val in7 = Source(List(7))
- // First layer
- in7 ~> b7.in
- b7.out(0) ~> m11.in(0)
- b7.out(1) ~> m8.in(0)
+ // First layer
+ in7 ~> b7.in
+ b7.out(0) ~> m11.in(0)
+ b7.out(1) ~> m8.in(0)
- in5 ~> m11.in(1)
+ in5 ~> m11.in(1)
- in3 ~> b3.in
- b3.out(0) ~> m8.in(1)
- b3.out(1) ~> m10.in(0)
+ in3 ~> b3.in
+ b3.out(0) ~> m8.in(1)
+ b3.out(1) ~> m10.in(0)
- // Second layer
- m11.out ~> b11.in
- b11.out(0).grouped(1000) ~> sink2.in // Vertex 2 is omitted since it has only one in and out
- b11.out(1) ~> m9.in(0)
- b11.out(2) ~> m10.in(1)
+ // Second layer
+ m11.out ~> b11.in
+ b11.out(0).grouped(1000) ~> sink2.in // Vertex 2 is omitted since it has only one in and out
+ b11.out(1) ~> m9.in(0)
+ b11.out(2) ~> m10.in(1)
- m8.out ~> m9.in(1)
+ m8.out ~> m9.in(1)
- // Third layer
- m9.out.grouped(1000) ~> sink9.in
- m10.out.grouped(1000) ~> sink10.in
+ // Third layer
+ m9.out.grouped(1000) ~> sink9.in
+ m10.out.grouped(1000) ~> sink10.in
- ClosedShape
+ ClosedShape
}).run()
Await.result(resultFuture2, 3.seconds).sorted should be(List(5, 7))
@@ -139,16 +136,15 @@ class GraphOpsIntegrationSpec extends AkkaSpec {
"allow adding of flows to sources and sinks to flows" in {
- val resultFuture = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒
- (sink) ⇒
- val bcast = b.add(Broadcast[Int](2))
- val merge = b.add(Merge[Int](2))
+ val resultFuture = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒ (sink) ⇒
+ val bcast = b.add(Broadcast[Int](2))
+ val merge = b.add(Merge[Int](2))
- Source(List(1, 2, 3)).map(_ * 2) ~> bcast.in
- bcast.out(0) ~> merge.in(0)
- bcast.out(1).map(_ + 3) ~> merge.in(1)
- merge.out.grouped(10) ~> sink.in
- ClosedShape
+ Source(List(1, 2, 3)).map(_ * 2) ~> bcast.in
+ bcast.out(0) ~> merge.in(0)
+ bcast.out(1).map(_ + 3) ~> merge.in(1)
+ merge.out.grouped(10) ~> sink.in
+ ClosedShape
}).run()
Await.result(resultFuture, 3.seconds) should contain theSameElementsAs (Seq(2, 4, 6, 5, 7, 9))
@@ -173,24 +169,23 @@ class GraphOpsIntegrationSpec extends AkkaSpec {
"be possible to use as lego bricks" in {
val shuffler = Shuffle(Flow[Int].map(_ + 1))
- val f: Future[Seq[Int]] = RunnableGraph.fromGraph(GraphDSL.create(shuffler, shuffler, shuffler, Sink.head[Seq[Int]])((_, _, _, fut) ⇒ fut) { implicit b ⇒
- (s1, s2, s3, sink) ⇒
- val merge = b.add(Merge[Int](2))
+ val f: Future[Seq[Int]] = RunnableGraph.fromGraph(GraphDSL.create(shuffler, shuffler, shuffler, Sink.head[Seq[Int]])((_, _, _, fut) ⇒ fut) { implicit b ⇒ (s1, s2, s3, sink) ⇒
+ val merge = b.add(Merge[Int](2))
- Source(List(1, 2, 3)) ~> s1.in1
- Source(List(10, 11, 12)) ~> s1.in2
+ Source(List(1, 2, 3)) ~> s1.in1
+ Source(List(10, 11, 12)) ~> s1.in2
- s1.out1 ~> s2.in1
- s1.out2 ~> s2.in2
+ s1.out1 ~> s2.in1
+ s1.out2 ~> s2.in2
- s2.out1 ~> s3.in1
- s2.out2 ~> s3.in2
+ s2.out1 ~> s3.in1
+ s2.out2 ~> s3.in2
- s3.out1 ~> merge.in(0)
- s3.out2 ~> merge.in(1)
+ s3.out1 ~> merge.in(0)
+ s3.out2 ~> merge.in(1)
- merge.out.grouped(1000) ~> sink
- ClosedShape
+ merge.out.grouped(1000) ~> sink
+ ClosedShape
}).run()
val result = Await.result(f, 3.seconds)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala
index 4e82b11829..cb4ffdf612 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala
@@ -26,35 +26,32 @@ class GraphPartialSpec extends AkkaSpec {
FlowShape(bcast.in, zip.out)
}
- val (_, _, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b ⇒
- (d1, d2, sink) ⇒
- Source(List(1, 2, 3)) ~> d1.in
- d1.out ~> d2.in
- d2.out.grouped(100) ~> sink.in
- ClosedShape
+ val (_, _, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b ⇒ (d1, d2, sink) ⇒
+ Source(List(1, 2, 3)) ~> d1.in
+ d1.out ~> d2.in
+ d2.out.grouped(100) ~> sink.in
+ ClosedShape
}).run()
Await.result(result, 3.seconds) should be(List(4, 8, 12))
}
"be able to build and reuse simple materializing partial graphs" in {
- val doubler = GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒
- sink ⇒
- val bcast = b.add(Broadcast[Int](3))
- val zip = b.add(ZipWith((a: Int, b: Int) ⇒ a + b))
+ val doubler = GraphDSL.create(Sink.head[Seq[Int]]) { implicit b ⇒ sink ⇒
+ val bcast = b.add(Broadcast[Int](3))
+ val zip = b.add(ZipWith((a: Int, b: Int) ⇒ a + b))
- bcast.out(0) ~> zip.in0
- bcast.out(1) ~> zip.in1
- bcast.out(2).grouped(100) ~> sink.in
- FlowShape(bcast.in, zip.out)
+ bcast.out(0) ~> zip.in0
+ bcast.out(1) ~> zip.in1
+ bcast.out(2).grouped(100) ~> sink.in
+ FlowShape(bcast.in, zip.out)
}
- val (sub1, sub2, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b ⇒
- (d1, d2, sink) ⇒
- Source(List(1, 2, 3)) ~> d1.in
- d1.out ~> d2.in
- d2.out.grouped(100) ~> sink.in
- ClosedShape
+ val (sub1, sub2, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b ⇒ (d1, d2, sink) ⇒
+ Source(List(1, 2, 3)) ~> d1.in
+ d1.out ~> d2.in
+ d2.out.grouped(100) ~> sink.in
+ ClosedShape
}).run()
Await.result(result, 3.seconds) should be(List(4, 8, 12))
@@ -65,28 +62,26 @@ class GraphPartialSpec extends AkkaSpec {
"be able to build and reuse complex materializing partial graphs" in {
val summer = Sink.fold[Int, Int](0)(_ + _)
- val doubler = GraphDSL.create(summer, summer)(Tuple2.apply) { implicit b ⇒
- (s1, s2) ⇒
- val bcast = b.add(Broadcast[Int](3))
- val bcast2 = b.add(Broadcast[Int](2))
- val zip = b.add(ZipWith((a: Int, b: Int) ⇒ a + b))
+ val doubler = GraphDSL.create(summer, summer)(Tuple2.apply) { implicit b ⇒ (s1, s2) ⇒
+ val bcast = b.add(Broadcast[Int](3))
+ val bcast2 = b.add(Broadcast[Int](2))
+ val zip = b.add(ZipWith((a: Int, b: Int) ⇒ a + b))
- bcast.out(0) ~> zip.in0
- bcast.out(1) ~> zip.in1
- bcast.out(2) ~> s1.in
+ bcast.out(0) ~> zip.in0
+ bcast.out(1) ~> zip.in1
+ bcast.out(2) ~> s1.in
- zip.out ~> bcast2.in
- bcast2.out(0) ~> s2.in
+ zip.out ~> bcast2.in
+ bcast2.out(0) ~> s2.in
- FlowShape(bcast.in, bcast2.out(1))
+ FlowShape(bcast.in, bcast2.out(1))
}
- val (sub1, sub2, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b ⇒
- (d1, d2, sink) ⇒
- Source(List(1, 2, 3)) ~> d1.in
- d1.out ~> d2.in
- d2.out.grouped(100) ~> sink.in
- ClosedShape
+ val (sub1, sub2, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b ⇒ (d1, d2, sink) ⇒
+ Source(List(1, 2, 3)) ~> d1.in
+ d1.out ~> d2.in
+ d2.out.grouped(100) ~> sink.in
+ ClosedShape
}).run()
Await.result(result, 3.seconds) should be(List(4, 8, 12))
@@ -97,17 +92,15 @@ class GraphPartialSpec extends AkkaSpec {
}
"be able to expose the ports of imported graphs" in {
- val p = GraphDSL.create(Flow[Int].map(_ + 1)) { implicit b ⇒
- flow ⇒
- FlowShape(flow.in, flow.out)
+ val p = GraphDSL.create(Flow[Int].map(_ + 1)) { implicit b ⇒ flow ⇒
+ FlowShape(flow.in, flow.out)
}
- val fut = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Int], p)(Keep.left) { implicit b ⇒
- (sink, flow) ⇒
- import GraphDSL.Implicits._
- Source.single(0) ~> flow.in
- flow.out ~> sink.in
- ClosedShape
+ val fut = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Int], p)(Keep.left) { implicit b ⇒ (sink, flow) ⇒
+ import GraphDSL.Implicits._
+ Source.single(0) ~> flow.in
+ flow.out ~> sink.in
+ ClosedShape
}).run()
Await.result(fut, 3.seconds) should be(1)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala
index a8e97b28ac..a30c3a31f3 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala
@@ -23,18 +23,17 @@ class GraphPartitionSpec extends AkkaSpec {
"partition to three subscribers" in assertAllStagesStopped {
- val (s1, s2, s3) = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b ⇒
- (sink1, sink2, sink3) ⇒
- val partition = b.add(Partition[Int](3, {
- case g if (g > 3) ⇒ 0
- case l if (l < 3) ⇒ 1
- case e if (e == 3) ⇒ 2
- }))
- Source(List(1, 2, 3, 4, 5)) ~> partition.in
- partition.out(0) ~> sink1.in
- partition.out(1) ~> sink2.in
- partition.out(2) ~> sink3.in
- ClosedShape
+ val (s1, s2, s3) = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b ⇒ (sink1, sink2, sink3) ⇒
+ val partition = b.add(Partition[Int](3, {
+ case g if (g > 3) ⇒ 0
+ case l if (l < 3) ⇒ 1
+ case e if (e == 3) ⇒ 2
+ }))
+ Source(List(1, 2, 3, 4, 5)) ~> partition.in
+ partition.out(0) ~> sink1.in
+ partition.out(1) ~> sink2.in
+ partition.out(2) ~> sink3.in
+ ClosedShape
}).run()
s1.futureValue.toSet should ===(Set(4, 5))
@@ -124,16 +123,15 @@ class GraphPartitionSpec extends AkkaSpec {
val s = Sink.seq[Int]
val input = Set(5, 2, 9, 1, 1, 1, 10)
- val g = RunnableGraph.fromGraph(GraphDSL.create(s) { implicit b ⇒
- sink ⇒
- val partition = b.add(Partition[Int](2, { case l if l < 4 ⇒ 0; case _ ⇒ 1 }))
- val merge = b.add(Merge[Int](2))
- Source(input) ~> partition.in
- partition.out(0) ~> merge.in(0)
- partition.out(1) ~> merge.in(1)
- merge.out ~> sink.in
+ val g = RunnableGraph.fromGraph(GraphDSL.create(s) { implicit b ⇒ sink ⇒
+ val partition = b.add(Partition[Int](2, { case l if l < 4 ⇒ 0; case _ ⇒ 1 }))
+ val merge = b.add(Merge[Int](2))
+ Source(input) ~> partition.in
+ partition.out(0) ~> merge.in(0)
+ partition.out(1) ~> merge.in(1)
+ merge.out ~> sink.in
- ClosedShape
+ ClosedShape
})
val result = Await.result(g.run(), remainingOrDefault)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala
index 9dbb847352..d0f9be0c78 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala
@@ -25,7 +25,7 @@ class GraphUnzipSpec extends AkkaSpec {
RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
val unzip = b.add(Unzip[Int, String]())
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out1 ~> Flow[String].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2)
unzip.out0 ~> Flow[Int].buffer(16, OverflowStrategy.backpressure).map(_ * 2) ~> Sink.fromSubscriber(c1)
ClosedShape
@@ -55,7 +55,7 @@ class GraphUnzipSpec extends AkkaSpec {
RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
val unzip = b.add(Unzip[Int, String]())
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out0 ~> Sink.fromSubscriber(c1)
unzip.out1 ~> Sink.fromSubscriber(c2)
ClosedShape
@@ -77,7 +77,7 @@ class GraphUnzipSpec extends AkkaSpec {
RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
val unzip = b.add(Unzip[Int, String]())
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out0 ~> Sink.fromSubscriber(c1)
unzip.out1 ~> Sink.fromSubscriber(c2)
ClosedShape
@@ -99,7 +99,7 @@ class GraphUnzipSpec extends AkkaSpec {
RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
val unzip = b.add(Unzip[Int, String]())
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out0 ~> Sink.fromSubscriber(c1)
unzip.out1 ~> Sink.fromSubscriber(c2)
ClosedShape
@@ -122,7 +122,7 @@ class GraphUnzipSpec extends AkkaSpec {
RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
val unzip = b.add(Unzip[Int, String]())
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out0 ~> Sink.fromSubscriber(c1)
unzip.out1 ~> Sink.fromSubscriber(c2)
ClosedShape
@@ -158,10 +158,10 @@ class GraphUnzipSpec extends AkkaSpec {
sub1.request(3)
sub2.request(3)
p1.expectRequest(p1Sub, 16)
- p1Sub.sendNext(1 -> "a")
+ p1Sub.sendNext(1 → "a")
c1.expectNext(1)
c2.expectNext("a")
- p1Sub.sendNext(2 -> "b")
+ p1Sub.sendNext(2 → "b")
c1.expectNext(2)
c2.expectNext("b")
sub1.cancel()
@@ -174,7 +174,7 @@ class GraphUnzipSpec extends AkkaSpec {
RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒
val zip = b.add(Zip[Int, String]())
val unzip = b.add(Unzip[Int, String]())
- Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in
+ Source(List(1 → "a", 2 → "b", 3 → "c")) ~> unzip.in
unzip.out0 ~> zip.in0
unzip.out1 ~> zip.in1
zip.out ~> Sink.fromSubscriber(c1)
@@ -183,9 +183,9 @@ class GraphUnzipSpec extends AkkaSpec {
val sub1 = c1.expectSubscription()
sub1.request(5)
- c1.expectNext(1 -> "a")
- c1.expectNext(2 -> "b")
- c1.expectNext(3 -> "c")
+ c1.expectNext(1 → "a")
+ c1.expectNext(2 → "b")
+ c1.expectNext(3 → "c")
c1.expectComplete()
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala
index e10399f3df..95c51c4e2a 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala
@@ -59,15 +59,14 @@ class GraphZipNSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[Int]()
val downstream = TestSubscriber.probe[immutable.Seq[Int]]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zipN = b.add(ZipN[Int](2))
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zipN = b.add(ZipN[Int](2))
- Source.fromPublisher(upstream1) ~> zipN.in(0)
- Source.fromPublisher(upstream2) ~> zipN.in(1)
- zipN.out ~> out
+ Source.fromPublisher(upstream1) ~> zipN.in(0)
+ Source.fromPublisher(upstream2) ~> zipN.in(1)
+ zipN.out ~> out
- ClosedShape
+ ClosedShape
}).run()
upstream1.sendNext(1)
@@ -85,15 +84,14 @@ class GraphZipNSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[Int]()
val downstream = TestSubscriber.probe[immutable.Seq[Int]]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zipN = b.add(ZipN[Int](2))
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zipN = b.add(ZipN[Int](2))
- Source.fromPublisher(upstream1) ~> zipN.in(0)
- Source.fromPublisher(upstream2) ~> zipN.in(1)
- zipN.out ~> out
+ Source.fromPublisher(upstream1) ~> zipN.in(0)
+ Source.fromPublisher(upstream2) ~> zipN.in(1)
+ zipN.out ~> out
- ClosedShape
+ ClosedShape
}).run()
downstream.request(1)
@@ -112,15 +110,14 @@ class GraphZipNSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[Int]()
val downstream = TestSubscriber.probe[immutable.Seq[Int]]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zipN = b.add(ZipN[Int](2))
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zipN = b.add(ZipN[Int](2))
- Source.fromPublisher(upstream1) ~> zipN.in(0)
- Source.fromPublisher(upstream2) ~> zipN.in(1)
- zipN.out ~> out
+ Source.fromPublisher(upstream1) ~> zipN.in(0)
+ Source.fromPublisher(upstream2) ~> zipN.in(1)
+ zipN.out ~> out
- ClosedShape
+ ClosedShape
}).run()
upstream1.sendNext(1)
@@ -138,15 +135,14 @@ class GraphZipNSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[Int]()
val downstream = TestSubscriber.probe[immutable.Seq[Int]]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zipN = b.add(ZipN[Int](2))
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zipN = b.add(ZipN[Int](2))
- Source.fromPublisher(upstream1) ~> zipN.in(0)
- Source.fromPublisher(upstream2) ~> zipN.in(1)
- zipN.out ~> out
+ Source.fromPublisher(upstream1) ~> zipN.in(0)
+ Source.fromPublisher(upstream2) ~> zipN.in(1)
+ zipN.out ~> out
- ClosedShape
+ ClosedShape
}).run()
upstream1.sendNext(1)
@@ -165,15 +161,14 @@ class GraphZipNSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[Int]()
val downstream = TestSubscriber.probe[immutable.Seq[Int]]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zipN = b.add(ZipN[Int](2))
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zipN = b.add(ZipN[Int](2))
- Source.fromPublisher(upstream1) ~> zipN.in(0)
- Source.fromPublisher(upstream2) ~> zipN.in(1)
- zipN.out ~> out
+ Source.fromPublisher(upstream1) ~> zipN.in(0)
+ Source.fromPublisher(upstream2) ~> zipN.in(1)
+ zipN.out ~> out
- ClosedShape
+ ClosedShape
}).run()
downstream.ensureSubscription()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala
index 83762d8b9a..c6e6576eda 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala
@@ -57,15 +57,14 @@ class GraphZipSpec extends TwoStreamsSetup {
val upstream1 = TestPublisher.probe[Int]()
val upstream2 = TestPublisher.probe[String]()
- val completed = RunnableGraph.fromGraph(GraphDSL.create(Sink.ignore) { implicit b ⇒
- out ⇒
- val zip = b.add(Zip[Int, String]())
+ val completed = RunnableGraph.fromGraph(GraphDSL.create(Sink.ignore) { implicit b ⇒ out ⇒
+ val zip = b.add(Zip[Int, String]())
- Source.fromPublisher(upstream1) ~> zip.in0
- Source.fromPublisher(upstream2) ~> zip.in1
- zip.out ~> out
+ Source.fromPublisher(upstream1) ~> zip.in0
+ Source.fromPublisher(upstream2) ~> zip.in1
+ zip.out ~> out
- ClosedShape
+ ClosedShape
}).run()
upstream1.sendNext(1)
@@ -83,15 +82,14 @@ class GraphZipSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[String]()
val downstream = TestSubscriber.probe[(Int, String)]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zip = b.add(Zip[Int, String]())
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zip = b.add(Zip[Int, String]())
- Source.fromPublisher(upstream1) ~> zip.in0
- Source.fromPublisher(upstream2) ~> zip.in1
- zip.out ~> out
+ Source.fromPublisher(upstream1) ~> zip.in0
+ Source.fromPublisher(upstream2) ~> zip.in1
+ zip.out ~> out
- ClosedShape
+ ClosedShape
}).run()
downstream.request(1)
@@ -110,15 +108,14 @@ class GraphZipSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[String]()
val downstream = TestSubscriber.probe[(Int, String)]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zip = b.add(Zip[Int, String]())
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zip = b.add(Zip[Int, String]())
- Source.fromPublisher(upstream1) ~> zip.in0
- Source.fromPublisher(upstream2) ~> zip.in1
- zip.out ~> out
+ Source.fromPublisher(upstream1) ~> zip.in0
+ Source.fromPublisher(upstream2) ~> zip.in1
+ zip.out ~> out
- ClosedShape
+ ClosedShape
}).run()
upstream1.sendNext(1)
@@ -136,15 +133,14 @@ class GraphZipSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[String]()
val downstream = TestSubscriber.probe[(Int, String)]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zip = b.add(Zip[Int, String]())
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zip = b.add(Zip[Int, String]())
- Source.fromPublisher(upstream1) ~> zip.in0
- Source.fromPublisher(upstream2) ~> zip.in1
- zip.out ~> out
+ Source.fromPublisher(upstream1) ~> zip.in0
+ Source.fromPublisher(upstream2) ~> zip.in1
+ zip.out ~> out
- ClosedShape
+ ClosedShape
}).run()
upstream1.sendNext(1)
@@ -163,15 +159,14 @@ class GraphZipSpec extends TwoStreamsSetup {
val upstream2 = TestPublisher.probe[String]()
val downstream = TestSubscriber.probe[(Int, String)]()
- RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒
- out ⇒
- val zip = b.add(Zip[Int, String]())
+ RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b ⇒ out ⇒
+ val zip = b.add(Zip[Int, String]())
- Source.fromPublisher(upstream1) ~> zip.in0
- Source.fromPublisher(upstream2) ~> zip.in1
- zip.out ~> out
+ Source.fromPublisher(upstream1) ~> zip.in0
+ Source.fromPublisher(upstream2) ~> zip.in1
+ zip.out ~> out
- ClosedShape
+ ClosedShape
}).run()
downstream.ensureSubscription()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala
index aa2b5cddb3..53e728e67e 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala
@@ -19,16 +19,15 @@ class PublisherSinkSpec extends AkkaSpec {
"be unique when created twice" in assertAllStagesStopped {
- val (pub1, pub2) = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { implicit b ⇒
- (p1, p2) ⇒
- import GraphDSL.Implicits._
+ val (pub1, pub2) = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { implicit b ⇒ (p1, p2) ⇒
+ import GraphDSL.Implicits._
- val bcast = b.add(Broadcast[Int](2))
+ val bcast = b.add(Broadcast[Int](2))
- Source(0 to 5) ~> bcast.in
- bcast.out(0).map(_ * 2) ~> p1.in
- bcast.out(1) ~> p2.in
- ClosedShape
+ Source(0 to 5) ~> bcast.in
+ bcast.out(0).map(_ * 2) ~> p1.in
+ bcast.out(1) ~> p2.in
+ ClosedShape
}).run()
val f1 = Source.fromPublisher(pub1).map(identity).runFold(0)(_ + _)
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala
index 075526a7f5..1025f07ab0 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala
@@ -17,18 +17,16 @@ class ReverseArrowSpec extends AkkaSpec {
"Reverse Arrows in the Graph DSL" must {
"work from Inlets" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- s.in <~ source
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ s.in <~ source
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work from SinkShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- s <~ source
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ s <~ source
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
@@ -66,133 +64,120 @@ class ReverseArrowSpec extends AkkaSpec {
}
"work from FlowShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: FlowShape[Int, Int] = b.add(Flow[Int])
- f <~ source
- f ~> s
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: FlowShape[Int, Int] = b.add(Flow[Int])
+ f <~ source
+ f ~> s
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work from UniformFanInShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2))
- f <~ source
- f <~ Source.empty
- f ~> s
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2))
+ f <~ source
+ f <~ Source.empty
+ f ~> s
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work from UniformFanOutShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2))
- f <~ source
- f ~> Sink.ignore
- f ~> s
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2))
+ f <~ source
+ f ~> Sink.ignore
+ f ~> s
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work towards Outlets" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val o: Outlet[Int] = b.add(source).out
- s <~ o
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val o: Outlet[Int] = b.add(source).out
+ s <~ o
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work towards SourceShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val o: SourceShape[Int] = b.add(source)
- s <~ o
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val o: SourceShape[Int] = b.add(source)
+ s <~ o
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work towards Source" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- s <~ source
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ s <~ source
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work towards FlowShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: FlowShape[Int, Int] = b.add(Flow[Int])
- s <~ f
- source ~> f
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: FlowShape[Int, Int] = b.add(Flow[Int])
+ s <~ f
+ source ~> f
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work towards UniformFanInShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2))
- s <~ f
- Source.empty ~> f
- source ~> f
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2))
+ s <~ f
+ Source.empty ~> f
+ source ~> f
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"fail towards already full UniformFanInShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2))
- val src = b.add(source)
- Source.empty ~> f
- src ~> f
- (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("no more inlets free")
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2))
+ val src = b.add(source)
+ Source.empty ~> f
+ src ~> f
+ (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("no more inlets free")
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work towards UniformFanOutShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2))
- s <~ f
- Sink.ignore <~ f
- source ~> f
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2))
+ s <~ f
+ Sink.ignore <~ f
+ source ~> f
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"fail towards already full UniformFanOutShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2))
- val sink2: SinkShape[Int] = b.add(Sink.ignore)
- val src = b.add(source)
- src ~> f
- sink2 <~ f
- (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("already connected")
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2))
+ val sink2: SinkShape[Int] = b.add(Sink.ignore)
+ val src = b.add(source)
+ src ~> f
+ sink2 <~ f
+ (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("already connected")
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work across a Flow" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- s <~ Flow[Int] <~ source
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ s <~ Flow[Int] <~ source
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
"work across a FlowShape" in {
- Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒
- s ⇒
- s <~ b.add(Flow[Int]) <~ source
- ClosedShape
+ Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b ⇒ s ⇒
+ s <~ b.add(Flow[Int]) <~ source
+ ClosedShape
}).run(), 1.second) should ===(Seq(1, 2, 3))
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala
index 6bfa3d83c3..5b1458eb40 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala
@@ -23,7 +23,7 @@ class SinkForeachParallelSpec extends AkkaSpec {
implicit val ec = system.dispatcher
val probe = TestProbe()
- val latch = (1 to 4).map(_ -> TestLatch(1)).toMap
+ val latch = (1 to 4).map(_ → TestLatch(1)).toMap
val p = Source(1 to 4).runWith(Sink.foreachParallel(4)((n: Int) ⇒ {
Await.ready(latch(n), 5.seconds)
probe.ref ! n
@@ -48,7 +48,7 @@ class SinkForeachParallelSpec extends AkkaSpec {
implicit val ec = system.dispatcher
val probe = TestProbe()
- val latch = (1 to 5).map(_ -> TestLatch()).toMap
+ val latch = (1 to 5).map(_ → TestLatch()).toMap
val p = Source(1 to 5).runWith(Sink.foreachParallel(4)((n: Int) ⇒ {
probe.ref ! n
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala
index 2e67760b12..4a1b9e492c 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala
@@ -42,12 +42,11 @@ class SinkSpec extends AkkaSpec with DefaultTimeout with ScalaFutures {
"be composable with importing 1 module" in {
val probes = Array.fill(3)(TestSubscriber.manualProbe[Int])
- val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0))) { implicit b ⇒
- s0 ⇒
- val bcast = b.add(Broadcast[Int](3))
- bcast.out(0) ~> Flow[Int].filter(_ == 0) ~> s0.in
- for (i ← 1 to 2) bcast.out(i).filter(_ == i) ~> Sink.fromSubscriber(probes(i))
- SinkShape(bcast.in)
+ val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0))) { implicit b ⇒ s0 ⇒
+ val bcast = b.add(Broadcast[Int](3))
+ bcast.out(0) ~> Flow[Int].filter(_ == 0) ~> s0.in
+ for (i ← 1 to 2) bcast.out(i).filter(_ == i) ~> Sink.fromSubscriber(probes(i))
+ SinkShape(bcast.in)
})
Source(List(0, 1, 2)).runWith(sink)
@@ -59,13 +58,12 @@ class SinkSpec extends AkkaSpec with DefaultTimeout with ScalaFutures {
"be composable with importing 2 modules" in {
val probes = Array.fill(3)(TestSubscriber.manualProbe[Int])
- val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)))(List(_, _)) { implicit b ⇒
- (s0, s1) ⇒
- val bcast = b.add(Broadcast[Int](3))
- bcast.out(0).filter(_ == 0) ~> s0.in
- bcast.out(1).filter(_ == 1) ~> s1.in
- bcast.out(2).filter(_ == 2) ~> Sink.fromSubscriber(probes(2))
- SinkShape(bcast.in)
+ val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)))(List(_, _)) { implicit b ⇒ (s0, s1) ⇒
+ val bcast = b.add(Broadcast[Int](3))
+ bcast.out(0).filter(_ == 0) ~> s0.in
+ bcast.out(1).filter(_ == 1) ~> s1.in
+ bcast.out(2).filter(_ == 2) ~> Sink.fromSubscriber(probes(2))
+ SinkShape(bcast.in)
})
Source(List(0, 1, 2)).runWith(sink)
@@ -77,13 +75,12 @@ class SinkSpec extends AkkaSpec with DefaultTimeout with ScalaFutures {
"be composable with importing 3 modules" in {
val probes = Array.fill(3)(TestSubscriber.manualProbe[Int])
- val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)), Sink.fromSubscriber(probes(2)))(List(_, _, _)) { implicit b ⇒
- (s0, s1, s2) ⇒
- val bcast = b.add(Broadcast[Int](3))
- bcast.out(0).filter(_ == 0) ~> s0.in
- bcast.out(1).filter(_ == 1) ~> s1.in
- bcast.out(2).filter(_ == 2) ~> s2.in
- SinkShape(bcast.in)
+ val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)), Sink.fromSubscriber(probes(2)))(List(_, _, _)) { implicit b ⇒ (s0, s1, s2) ⇒
+ val bcast = b.add(Broadcast[Int](3))
+ bcast.out(0).filter(_ == 0) ~> s0.in
+ bcast.out(1).filter(_ == 1) ~> s1.in
+ bcast.out(2).filter(_ == 2) ~> s2.in
+ SinkShape(bcast.in)
})
Source(List(0, 1, 2)).runWith(sink)
@@ -142,10 +139,11 @@ class SinkSpec extends AkkaSpec with DefaultTimeout with ScalaFutures {
"Java collector Sink" must {
import scala.compat.java8.FunctionConverters._
- class TestCollector(_supplier: () ⇒ Supplier[Array[Int]],
- _accumulator: () ⇒ BiConsumer[Array[Int], Int],
- _combiner: () ⇒ BinaryOperator[Array[Int]],
- _finisher: () ⇒ function.Function[Array[Int], Int]) extends Collector[Int, Array[Int], Int] {
+ class TestCollector(
+ _supplier: () ⇒ Supplier[Array[Int]],
+ _accumulator: () ⇒ BiConsumer[Array[Int], Int],
+ _combiner: () ⇒ BinaryOperator[Array[Int]],
+ _finisher: () ⇒ function.Function[Array[Int], Int]) extends Collector[Int, Array[Int], Int] {
override def supplier(): Supplier[Array[Int]] = _supplier()
override def combiner(): BinaryOperator[Array[Int]] = _combiner()
override def finisher(): function.Function[Array[Int], Int] = _finisher()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala
index a784c89d7a..1beda33549 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala
@@ -143,16 +143,15 @@ class SourceSpec extends AkkaSpec with DefaultTimeout {
val source = Source.asSubscriber[Int]
val out = TestSubscriber.manualProbe[Int]
- val s = Source.fromGraph(GraphDSL.create(source, source, source, source, source)(immutable.Seq(_, _, _, _, _)) { implicit b ⇒
- (i0, i1, i2, i3, i4) ⇒
- import GraphDSL.Implicits._
- val m = b.add(Merge[Int](5))
- i0.out ~> m.in(0)
- i1.out ~> m.in(1)
- i2.out ~> m.in(2)
- i3.out ~> m.in(3)
- i4.out ~> m.in(4)
- SourceShape(m.out)
+ val s = Source.fromGraph(GraphDSL.create(source, source, source, source, source)(immutable.Seq(_, _, _, _, _)) { implicit b ⇒ (i0, i1, i2, i3, i4) ⇒
+ import GraphDSL.Implicits._
+ val m = b.add(Merge[Int](5))
+ i0.out ~> m.in(0)
+ i1.out ~> m.in(1)
+ i2.out ~> m.in(2)
+ i3.out ~> m.in(3)
+ i4.out ~> m.in(4)
+ SourceShape(m.out)
}).to(Sink.fromSubscriber(out)).run()
for (i ← 0 to 4) probes(i).subscribe(s(i))
@@ -241,11 +240,11 @@ class SourceSpec extends AkkaSpec with DefaultTimeout {
EventFilter[RuntimeException](message = "expected", occurrences = 1) intercept
whenReady(
Source.unfold((0, 1)) {
- case (a, _) if a > 10000000 ⇒ throw t
- case (a, b) ⇒ Some((b, a + b) → a)
- }.runFold(List.empty[Int]) { case (xs, x) ⇒ x :: xs }.failed) {
- _ should be theSameInstanceAs (t)
- }
+ case (a, _) if a > 10000000 ⇒ throw t
+ case (a, b) ⇒ Some((b, a + b) → a)
+ }.runFold(List.empty[Int]) { case (xs, x) ⇒ x :: xs }.failed) {
+ _ should be theSameInstanceAs (t)
+ }
}
"generate a finite fibonacci sequence asynchronously" in {
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala
index 6ff9e9d9d5..566e036383 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala
@@ -228,7 +228,7 @@ object StageActorRefSpec {
})
}
- logic -> p.future
+ logic → p.future
}
}
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala
index a5dc0ae744..669f8a6e6e 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala
@@ -62,10 +62,11 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
val closePromiseCalled = Promise[Done]()
val resource = new BufferedReader(new FileReader(manyLinesFile))
- val p = Source.unfoldResourceAsync[String, BufferedReader](() ⇒ {
- createPromiseCalled.success(Done)
- createPromise.future
- },
+ val p = Source.unfoldResourceAsync[String, BufferedReader](
+ () ⇒ {
+ createPromiseCalled.success(Done)
+ createPromise.future
+ },
reader ⇒ {
readPromiseCalled.success(Done)
readPromise.future
@@ -107,10 +108,11 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
val closePromiseCalled = Promise[Done]()
val resource = new BufferedReader(new FileReader(manyLinesFile))
- val p = Source.unfoldResourceAsync[String, BufferedReader](() ⇒ {
- createPromiseCalled.success(Done)
- createPromise.future
- },
+ val p = Source.unfoldResourceAsync[String, BufferedReader](
+ () ⇒ {
+ createPromiseCalled.success(Done)
+ createPromise.future
+ },
reader ⇒ {
readPromiseCalled.success(Done)
readPromise.future
@@ -134,7 +136,8 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"continue when Strategy is Resume and exception happened" in assertAllStagesStopped {
- val p = Source.unfoldResourceAsync[String, BufferedReader](open,
+ val p = Source.unfoldResourceAsync[String, BufferedReader](
+ open,
reader ⇒ {
val s = reader.readLine()
if (s != null && s.contains("b")) throw TE("") else Promise.successful(Option(s)).future
@@ -154,7 +157,8 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"close and open stream again when Strategy is Restart" in assertAllStagesStopped {
- val p = Source.unfoldResourceAsync[String, BufferedReader](open,
+ val p = Source.unfoldResourceAsync[String, BufferedReader](
+ open,
reader ⇒ {
val s = reader.readLine()
if (s != null && s.contains("b")) throw TE("") else Promise.successful(Option(s)).future
@@ -175,7 +179,8 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
"work with ByteString as well" in assertAllStagesStopped {
val chunkSize = 50
val buffer = Array.ofDim[Char](chunkSize)
- val p = Source.unfoldResourceAsync[ByteString, Reader](open,
+ val p = Source.unfoldResourceAsync[ByteString, Reader](
+ open,
reader ⇒ {
val p = Promise[Option[ByteString]]
val s = reader.read(buffer)
@@ -210,7 +215,8 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
val sys = ActorSystem("dispatcher-testing", UnboundedMailboxConfig)
val materializer = ActorMaterializer()(sys)
try {
- val p = Source.unfoldResourceAsync[String, BufferedReader](open,
+ val p = Source.unfoldResourceAsync[String, BufferedReader](
+ open,
read, close).runWith(TestSink.probe)(materializer)
materializer.asInstanceOf[ActorMaterializerImpl].supervisor.tell(StreamSupervisor.GetChildren, testActor)
@@ -220,7 +226,8 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"fail when create throws exception" in assertAllStagesStopped {
- val p = Source.unfoldResourceAsync[String, BufferedReader](() ⇒ throw TE(""),
+ val p = Source.unfoldResourceAsync[String, BufferedReader](
+ () ⇒ throw TE(""),
read, close).runWith(Sink.asPublisher(false))
val c = TestSubscriber.manualProbe[String]()
p.subscribe(c)
@@ -230,7 +237,8 @@ class UnfoldResourceAsyncSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"fail when close throws exception" in assertAllStagesStopped {
- val p = Source.unfoldResourceAsync[String, BufferedReader](open,
+ val p = Source.unfoldResourceAsync[String, BufferedReader](
+ open,
read, reader ⇒ throw TE(""))
.runWith(Sink.asPublisher(false))
val c = TestSubscriber.manualProbe[String]()
diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala
index 1c4b7ed68e..bfd560f2f1 100644
--- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala
+++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala
@@ -43,7 +43,8 @@ class UnfoldResourceSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
"Unfold Resource Source" must {
"read contents from a file" in assertAllStagesStopped {
- val p = Source.unfoldResource[String, BufferedReader](() ⇒ new BufferedReader(new FileReader(manyLinesFile)),
+ val p = Source.unfoldResource[String, BufferedReader](
+ () ⇒ new BufferedReader(new FileReader(manyLinesFile)),
reader ⇒ Option(reader.readLine()),
reader ⇒ reader.close())
.runWith(Sink.asPublisher(false))
@@ -69,7 +70,8 @@ class UnfoldResourceSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"continue when Strategy is Resume and exception happened" in assertAllStagesStopped {
- val p = Source.unfoldResource[String, BufferedReader](() ⇒ new BufferedReader(new FileReader(manyLinesFile)),
+ val p = Source.unfoldResource[String, BufferedReader](
+ () ⇒ new BufferedReader(new FileReader(manyLinesFile)),
reader ⇒ {
val s = reader.readLine()
if (s != null && s.contains("b")) throw TE("") else Option(s)
@@ -90,7 +92,8 @@ class UnfoldResourceSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"close and open stream again when Strategy is Restart" in assertAllStagesStopped {
- val p = Source.unfoldResource[String, BufferedReader](() ⇒ new BufferedReader(new FileReader(manyLinesFile)),
+ val p = Source.unfoldResource[String, BufferedReader](
+ () ⇒ new BufferedReader(new FileReader(manyLinesFile)),
reader ⇒ {
val s = reader.readLine()
if (s != null && s.contains("b")) throw TE("") else Option(s)
@@ -112,7 +115,8 @@ class UnfoldResourceSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
"work with ByteString as well" in assertAllStagesStopped {
val chunkSize = 50
val buffer = Array.ofDim[Char](chunkSize)
- val p = Source.unfoldResource[ByteString, Reader](() ⇒ new BufferedReader(new FileReader(manyLinesFile)),
+ val p = Source.unfoldResource[ByteString, Reader](
+ () ⇒ new BufferedReader(new FileReader(manyLinesFile)),
reader ⇒ {
val s = reader.read(buffer)
if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None
@@ -143,7 +147,8 @@ class UnfoldResourceSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
val sys = ActorSystem("dispatcher-testing", UnboundedMailboxConfig)
val materializer = ActorMaterializer()(sys)
try {
- val p = Source.unfoldResource[String, BufferedReader](() ⇒ new BufferedReader(new FileReader(manyLinesFile)),
+ val p = Source.unfoldResource[String, BufferedReader](
+ () ⇒ new BufferedReader(new FileReader(manyLinesFile)),
reader ⇒ Option(reader.readLine()),
reader ⇒ reader.close()).runWith(TestSink.probe)(materializer)
@@ -154,7 +159,8 @@ class UnfoldResourceSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"fail when create throws exception" in assertAllStagesStopped {
- val p = Source.unfoldResource[String, BufferedReader](() ⇒ throw TE(""),
+ val p = Source.unfoldResource[String, BufferedReader](
+ () ⇒ throw TE(""),
reader ⇒ Option(reader.readLine()),
reader ⇒ reader.close())
.runWith(Sink.asPublisher(false))
@@ -166,7 +172,8 @@ class UnfoldResourceSourceSpec extends AkkaSpec(UnboundedMailboxConfig) {
}
"fail when close throws exception" in assertAllStagesStopped {
- val p = Source.unfoldResource[String, BufferedReader](() ⇒ new BufferedReader(new FileReader(manyLinesFile)),
+ val p = Source.unfoldResource[String, BufferedReader](
+ () ⇒ new BufferedReader(new FileReader(manyLinesFile)),
reader ⇒ Option(reader.readLine()),
reader ⇒ throw TE(""))
.runWith(Sink.asPublisher(false))
diff --git a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala
index 4f06227db3..1e7ede7b60 100644
--- a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala
+++ b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala
@@ -202,16 +202,16 @@ object ActorMaterializerSettings {
* Create [[ActorMaterializerSettings]] from individual settings (Scala).
*/
def apply(
- initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int) =
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int) =
new ActorMaterializerSettings(
initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize)
@@ -243,16 +243,16 @@ object ActorMaterializerSettings {
* Create [[ActorMaterializerSettings]] from individual settings (Java).
*/
def create(
- initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int) =
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int) =
new ActorMaterializerSettings(
initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize)
@@ -277,28 +277,29 @@ object ActorMaterializerSettings {
* Please refer to the `withX` methods for descriptions of the individual settings.
*/
final class ActorMaterializerSettings private (
- val initialInputBufferSize: Int,
- val maxInputBufferSize: Int,
- val dispatcher: String,
- val supervisionDecider: Supervision.Decider,
+ val initialInputBufferSize: Int,
+ val maxInputBufferSize: Int,
+ val dispatcher: String,
+ val supervisionDecider: Supervision.Decider,
val subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- val debugLogging: Boolean,
- val outputBurstLimit: Int,
- val fuzzingMode: Boolean,
- val autoFusing: Boolean,
- val maxFixedBufferSize: Int,
- val syncProcessingLimit: Int) {
+ val debugLogging: Boolean,
+ val outputBurstLimit: Int,
+ val fuzzingMode: Boolean,
+ val autoFusing: Boolean,
+ val maxFixedBufferSize: Int,
+ val syncProcessingLimit: Int) {
- def this(initialInputBufferSize: Int,
- maxInputBufferSize: Int,
- dispatcher: String,
- supervisionDecider: Supervision.Decider,
- subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
- debugLogging: Boolean,
- outputBurstLimit: Int,
- fuzzingMode: Boolean,
- autoFusing: Boolean,
- maxFixedBufferSize: Int) {
+ def this(
+ initialInputBufferSize: Int,
+ maxInputBufferSize: Int,
+ dispatcher: String,
+ supervisionDecider: Supervision.Decider,
+ subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings,
+ debugLogging: Boolean,
+ outputBurstLimit: Int,
+ fuzzingMode: Boolean,
+ autoFusing: Boolean,
+ maxFixedBufferSize: Int) {
this(initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, defaultMaxFixedBufferSize)
}
@@ -310,17 +311,17 @@ final class ActorMaterializerSettings private (
require(initialInputBufferSize <= maxInputBufferSize, s"initialInputBufferSize($initialInputBufferSize) must be <= maxInputBufferSize($maxInputBufferSize)")
private def copy(
- initialInputBufferSize: Int = this.initialInputBufferSize,
- maxInputBufferSize: Int = this.maxInputBufferSize,
- dispatcher: String = this.dispatcher,
- supervisionDecider: Supervision.Decider = this.supervisionDecider,
+ initialInputBufferSize: Int = this.initialInputBufferSize,
+ maxInputBufferSize: Int = this.maxInputBufferSize,
+ dispatcher: String = this.dispatcher,
+ supervisionDecider: Supervision.Decider = this.supervisionDecider,
subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings = this.subscriptionTimeoutSettings,
- debugLogging: Boolean = this.debugLogging,
- outputBurstLimit: Int = this.outputBurstLimit,
- fuzzingMode: Boolean = this.fuzzingMode,
- autoFusing: Boolean = this.autoFusing,
- maxFixedBufferSize: Int = this.maxFixedBufferSize,
- syncProcessingLimit: Int = this.syncProcessingLimit) = {
+ debugLogging: Boolean = this.debugLogging,
+ outputBurstLimit: Int = this.outputBurstLimit,
+ fuzzingMode: Boolean = this.fuzzingMode,
+ autoFusing: Boolean = this.autoFusing,
+ maxFixedBufferSize: Int = this.maxFixedBufferSize,
+ syncProcessingLimit: Int = this.syncProcessingLimit) = {
new ActorMaterializerSettings(
initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging,
outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, syncProcessingLimit)
diff --git a/akka-stream/src/main/scala/akka/stream/Attributes.scala b/akka-stream/src/main/scala/akka/stream/Attributes.scala
index 2a76dcc916..e46a53c356 100644
--- a/akka-stream/src/main/scala/akka/stream/Attributes.scala
+++ b/akka-stream/src/main/scala/akka/stream/Attributes.scala
@@ -74,7 +74,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
* Java API: Get the first (least specific) attribute of a given `Class` or subclass thereof.
*/
def getFirstAttribute[T <: Attribute](c: Class[T]): Optional[T] =
- attributeList.collectFirst { case attr if c.isInstance(attr) => c cast attr }.asJava
+ attributeList.collectFirst { case attr if c.isInstance(attr) ⇒ c cast attr }.asJava
/**
* Scala API: get all attributes of a given type (or subtypes thereof).
@@ -105,7 +105,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
*/
def get[T <: Attribute: ClassTag]: Option[T] = {
val c = classTag[T].runtimeClass.asInstanceOf[Class[T]]
- attributeList.reverseIterator.collectFirst[T] { case attr if c.isInstance(attr) => c.cast(attr) }
+ attributeList.reverseIterator.collectFirst[T] { case attr if c.isInstance(attr) ⇒ c.cast(attr) }
}
/**
@@ -113,7 +113,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
*/
def getFirst[T <: Attribute: ClassTag]: Option[T] = {
val c = classTag[T].runtimeClass.asInstanceOf[Class[T]]
- attributeList.collectFirst { case attr if c.isInstance(attr) => c.cast(attr) }
+ attributeList.collectFirst { case attr if c.isInstance(attr) ⇒ c.cast(attr) }
}
/**
diff --git a/akka-stream/src/main/scala/akka/stream/Fusing.scala b/akka-stream/src/main/scala/akka/stream/Fusing.scala
index 6ff1277fd5..b89641eef5 100644
--- a/akka-stream/src/main/scala/akka/stream/Fusing.scala
+++ b/akka-stream/src/main/scala/akka/stream/Fusing.scala
@@ -38,8 +38,9 @@ object Fusing {
* holds more information on the operation structure of the contained stream
* topology for convenient graph traversal.
*/
- case class FusedGraph[+S <: Shape @uncheckedVariance, +M](override val module: FusedModule,
- override val shape: S) extends Graph[S, M] {
+ case class FusedGraph[+S <: Shape @uncheckedVariance, +M](
+ override val module: FusedModule,
+ override val shape: S) extends Graph[S, M] {
// the @uncheckedVariance look like a compiler bug ... why does it work in Graph but not here?
override def withAttributes(attr: Attributes) = copy(module = module.withAttributes(attr))
}
@@ -47,8 +48,8 @@ object Fusing {
object FusedGraph {
def unapply[S <: Shape, M](g: Graph[S, M]): Option[(FusedModule, S)] =
g.module match {
- case f: FusedModule => Some((f, g.shape))
- case _ => None
+ case f: FusedModule ⇒ Some((f, g.shape))
+ case _ ⇒ None
}
}
@@ -59,10 +60,11 @@ object Fusing {
* the wirings in a more accessible form, allowing traversal from port to upstream
* or downstream port and from there to the owning module (or graph vertex).
*/
- final case class StructuralInfo(upstreams: immutable.Map[InPort, OutPort],
- downstreams: immutable.Map[OutPort, InPort],
- inOwners: immutable.Map[InPort, Module],
- outOwners: immutable.Map[OutPort, Module],
- allModules: Set[Module])
+ final case class StructuralInfo(
+ upstreams: immutable.Map[InPort, OutPort],
+ downstreams: immutable.Map[OutPort, InPort],
+ inOwners: immutable.Map[InPort, Module],
+ outOwners: immutable.Map[OutPort, Module],
+ allModules: Set[Module])
}
diff --git a/akka-stream/src/main/scala/akka/stream/Materializer.scala b/akka-stream/src/main/scala/akka/stream/Materializer.scala
index 32d739def7..5b0a6b3729 100644
--- a/akka-stream/src/main/scala/akka/stream/Materializer.scala
+++ b/akka-stream/src/main/scala/akka/stream/Materializer.scala
@@ -78,6 +78,6 @@ private[akka] object NoMaterializer extends Materializer {
* Context parameter to the `create` methods of sources and sinks.
*/
private[akka] case class MaterializationContext(
- materializer: Materializer,
+ materializer: Materializer,
effectiveAttributes: Attributes,
- stageName: String)
+ stageName: String)
diff --git a/akka-stream/src/main/scala/akka/stream/Shape.scala b/akka-stream/src/main/scala/akka/stream/Shape.scala
index 1afe64802f..9c8556900e 100644
--- a/akka-stream/src/main/scala/akka/stream/Shape.scala
+++ b/akka-stream/src/main/scala/akka/stream/Shape.scala
@@ -306,10 +306,11 @@ object SinkShape {
* +------+
* }}}
*/
-final case class BidiShape[-In1, +Out1, -In2, +Out2](in1: Inlet[In1 @uncheckedVariance],
- out1: Outlet[Out1 @uncheckedVariance],
- in2: Inlet[In2 @uncheckedVariance],
- out2: Outlet[Out2 @uncheckedVariance]) extends Shape {
+final case class BidiShape[-In1, +Out1, -In2, +Out2](
+ in1: Inlet[In1 @uncheckedVariance],
+ out1: Outlet[Out1 @uncheckedVariance],
+ in2: Inlet[In2 @uncheckedVariance],
+ out2: Outlet[Out2 @uncheckedVariance]) extends Shape {
//#implementation-details-elided
override val inlets: immutable.Seq[Inlet[_]] = List(in1, in2)
override val outlets: immutable.Seq[Outlet[_]] = List(out1, out2)
@@ -335,10 +336,11 @@ object BidiShape {
BidiShape(top.in, top.out, bottom.in, bottom.out)
/** Java API */
- def of[In1, Out1, In2, Out2](in1: Inlet[In1 @uncheckedVariance],
- out1: Outlet[Out1 @uncheckedVariance],
- in2: Inlet[In2 @uncheckedVariance],
- out2: Outlet[Out2 @uncheckedVariance]): BidiShape[In1, Out1, In2, Out2] =
+ def of[In1, Out1, In2, Out2](
+ in1: Inlet[In1 @uncheckedVariance],
+ out1: Outlet[Out1 @uncheckedVariance],
+ in2: Inlet[In2 @uncheckedVariance],
+ out2: Outlet[Out2 @uncheckedVariance]): BidiShape[In1, Out1, In2, Out2] =
BidiShape(in1, out1, in2, out2)
}
diff --git a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala
index 944f6c3d35..7f02f4e1c0 100644
--- a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala
+++ b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala
@@ -190,9 +190,9 @@ object TLSProtocol {
*/
case class NegotiateNewSession(
enabledCipherSuites: Option[immutable.Seq[String]],
- enabledProtocols: Option[immutable.Seq[String]],
- clientAuth: Option[TLSClientAuth],
- sslParameters: Option[SSLParameters]) extends SslTlsOutbound {
+ enabledProtocols: Option[immutable.Seq[String]],
+ clientAuth: Option[TLSClientAuth],
+ sslParameters: Option[SSLParameters]) extends SslTlsOutbound {
/**
* Java API: Make a copy of this message with the given `enabledCipherSuites`.
diff --git a/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala b/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala
index ee930b4b24..7eec474a83 100644
--- a/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala
+++ b/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala
@@ -303,7 +303,8 @@ trait ActorPublisher[T] extends Actor {
tryOnComplete(sub)
case Active | Canceled ⇒
tryOnSubscribe(sub, CancelledSubscription)
- tryOnError(sub,
+ tryOnError(
+ sub,
if (subscriber == sub) ReactiveStreamsCompliance.canNotSubscribeTheSameSubscriberMultipleTimesException
else ReactiveStreamsCompliance.canNotSubscribeTheSameSubscriberMultipleTimesException)
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
index 065ec7092a..a8077aa412 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala
@@ -26,12 +26,13 @@ import akka.stream.impl.fusing.GraphInterpreterShell
/**
* INTERNAL API
*/
-private[akka] case class ActorMaterializerImpl(system: ActorSystem,
- override val settings: ActorMaterializerSettings,
- dispatchers: Dispatchers,
- supervisor: ActorRef,
- haveShutDown: AtomicBoolean,
- flowNames: SeqActorName) extends ActorMaterializer {
+private[akka] case class ActorMaterializerImpl(
+ system: ActorSystem,
+ override val settings: ActorMaterializerSettings,
+ dispatchers: Dispatchers,
+ supervisor: ActorRef,
+ haveShutDown: AtomicBoolean,
+ flowNames: SeqActorName) extends ActorMaterializer {
import akka.stream.impl.Stages._
private val _logger = Logging.getLogger(system, this)
override def logger = _logger
@@ -78,8 +79,9 @@ private[akka] case class ActorMaterializerImpl(system: ActorSystem,
override def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat]): Mat =
materialize(_runnableGraph, null)
- private[stream] def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat],
- subflowFuser: GraphInterpreterShell ⇒ ActorRef): Mat = {
+ private[stream] def materialize[Mat](
+ _runnableGraph: Graph[ClosedShape, Mat],
+ subflowFuser: GraphInterpreterShell ⇒ ActorRef): Mat = {
val runnableGraph =
if (settings.autoFusing) Fusing.aggressive(_runnableGraph)
else _runnableGraph
@@ -142,7 +144,8 @@ private[akka] case class ActorMaterializerImpl(system: ActorSystem,
case stage: GraphStageModule ⇒
val graph =
- GraphModule(GraphAssembly(stage.shape.inlets, stage.shape.outlets, stage.stage),
+ GraphModule(
+ GraphAssembly(stage.shape.inlets, stage.shape.outlets, stage.stage),
stage.shape, stage.attributes, Array(stage))
matGraph(graph, effectiveAttributes, matVal)
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala
index 2060317e26..50f90da798 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala
@@ -15,9 +15,9 @@ import akka.stream.stage._
* INTERNAL API
*/
private[akka] class ActorRefBackpressureSinkStage[In](ref: ActorRef, onInitMessage: Any,
- ackMessage: Any,
+ ackMessage: Any,
onCompleteMessage: Any,
- onFailureMessage: (Throwable) ⇒ Any)
+ onFailureMessage: (Throwable) ⇒ Any)
extends GraphStage[SinkShape[In]] {
val in: Inlet[In] = Inlet[In]("ActorRefBackpressureSink.in")
override def initialAttributes = DefaultAttributes.actorRefWithAck
diff --git a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala
index 9061de47fd..53cb12edd0 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala
@@ -47,7 +47,7 @@ private[akka] final case class ErrorPublisher(t: Throwable, name: String) extend
*/
private[akka] final case class MaybePublisher[T](
promise: Promise[Option[T]],
- name: String)(implicit ec: ExecutionContext) extends Publisher[T] {
+ name: String)(implicit ec: ExecutionContext) extends Publisher[T] {
import ReactiveStreamsCompliance._
private[this] class MaybeSubscription(subscriber: Subscriber[_ >: T]) extends Subscription {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala
index 3fe69931c0..908dc63246 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala
@@ -7,10 +7,11 @@ import org.reactivestreams.Subscriber
/**
* INTERNAL API
*/
-private[akka] abstract class FanoutOutputs(val maxBufferSize: Int,
- val initialBufferSize: Int,
- self: ActorRef,
- val pump: Pump)
+private[akka] abstract class FanoutOutputs(
+ val maxBufferSize: Int,
+ val initialBufferSize: Int,
+ self: ActorRef,
+ val pump: Pump)
extends DefaultOutputTransferStates
with SubscriberManagement[Any] {
diff --git a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala
index a073ffc964..478393ac41 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala
@@ -13,12 +13,15 @@ import ResizableMultiReaderRingBuffer._
* Contrary to many other ring buffer implementations this one does not automatically overwrite the oldest
* elements, rather, if full, the buffer tries to grow and rejects further writes if max capacity is reached.
*/
-private[akka] class ResizableMultiReaderRingBuffer[T](initialSize: Int, // constructor param, not field
- maxSize: Int, // constructor param, not field
- val cursors: Cursors) {
- require(Integer.lowestOneBit(maxSize) == maxSize && 0 < maxSize && maxSize <= Int.MaxValue / 2,
+private[akka] class ResizableMultiReaderRingBuffer[T](
+ initialSize: Int, // constructor param, not field
+ maxSize: Int, // constructor param, not field
+ val cursors: Cursors) {
+ require(
+ Integer.lowestOneBit(maxSize) == maxSize && 0 < maxSize && maxSize <= Int.MaxValue / 2,
"maxSize must be a power of 2 that is > 0 and < Int.MaxValue/2")
- require(Integer.lowestOneBit(initialSize) == initialSize && 0 < initialSize && initialSize <= maxSize,
+ require(
+ Integer.lowestOneBit(initialSize) == initialSize && 0 < initialSize && initialSize <= maxSize,
"initialSize must be a power of 2 that is > 0 and <= maxSize")
private[this] val maxSizeBit = Integer.numberOfTrailingZeros(maxSize)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala
index 3fb9bbe8ab..0c4295627a 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala
@@ -95,7 +95,7 @@ private[akka] class PublisherSink[In](val attributes: Attributes, shape: SinkSha
*/
private[akka] final class FanoutPublisherSink[In](
val attributes: Attributes,
- shape: SinkShape[In])
+ shape: SinkShape[In])
extends SinkModule[In, Publisher[In]](shape) {
override def create(context: MaterializationContext): (Subscriber[In], Publisher[In]) = {
@@ -176,12 +176,13 @@ private[akka] final class ActorSubscriberSink[In](props: Props, val attributes:
*/
private[akka] final class ActorRefSink[In](ref: ActorRef, onCompleteMessage: Any,
val attributes: Attributes,
- shape: SinkShape[In]) extends SinkModule[In, NotUsed](shape) {
+ shape: SinkShape[In]) extends SinkModule[In, NotUsed](shape) {
override def create(context: MaterializationContext) = {
val actorMaterializer = ActorMaterializer.downcast(context.materializer)
val effectiveSettings = actorMaterializer.effectiveSettings(context.effectiveAttributes)
- val subscriberRef = actorMaterializer.actorOf(context,
+ val subscriberRef = actorMaterializer.actorOf(
+ context,
ActorRefSinkActor.props(ref, effectiveSettings.maxInputBufferSize, onCompleteMessage))
(akka.stream.actor.ActorSubscriber[In](subscriberRef), NotUsed)
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/Sources.scala b/akka-stream/src/main/scala/akka/stream/impl/Sources.scala
index b79eab55ab..326d6f283f 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/Sources.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/Sources.scala
@@ -195,9 +195,10 @@ private[akka] final class SourceQueueAdapter[T](delegate: SourceQueueWithComplet
/**
* INTERNAL API
*/
-private[stream] final class UnfoldResourceSource[T, S](create: () ⇒ S,
- readData: (S) ⇒ Option[T],
- close: (S) ⇒ Unit) extends GraphStage[SourceShape[T]] {
+private[stream] final class UnfoldResourceSource[T, S](
+ create: () ⇒ S,
+ readData: (S) ⇒ Option[T],
+ close: (S) ⇒ Unit) extends GraphStage[SourceShape[T]] {
val out = Outlet[T]("UnfoldResourceSource.out")
override val shape = SourceShape(out)
override def initialAttributes: Attributes = DefaultAttributes.unfoldResourceSource
@@ -251,9 +252,10 @@ private[stream] final class UnfoldResourceSource[T, S](create: () ⇒ S,
override def toString = "UnfoldResourceSource"
}
-private[stream] final class UnfoldResourceSourceAsync[T, S](create: () ⇒ Future[S],
- readData: (S) ⇒ Future[Option[T]],
- close: (S) ⇒ Future[Done]) extends GraphStage[SourceShape[T]] {
+private[stream] final class UnfoldResourceSourceAsync[T, S](
+ create: () ⇒ Future[S],
+ readData: (S) ⇒ Future[Option[T]],
+ close: (S) ⇒ Future[Done]) extends GraphStage[SourceShape[T]] {
val out = Outlet[T]("UnfoldResourceSourceAsync.out")
override val shape = SourceShape(out)
override def initialAttributes: Attributes = DefaultAttributes.unfoldResourceSourceAsync
diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala
index e351a38c8b..3d7e0752fd 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala
@@ -204,10 +204,12 @@ object StreamLayout {
final def wire(from: OutPort, to: InPort): Module = {
if (Debug) validate(this)
- require(outPorts(from),
+ require(
+ outPorts(from),
if (downstreams.contains(from)) s"The output port [$from] is already connected"
else s"The output port [$from] is not part of the underlying graph.")
- require(inPorts(to),
+ require(
+ inPorts(to),
if (upstreams.contains(to)) s"The input port [$to] is already connected"
else s"The input port [$to] is not part of the underlying graph.")
@@ -360,7 +362,7 @@ object StreamLayout {
if (IgnorableMatValComp(that))
Ignore
else
- Transform(_ => NotUsed, that.materializedValueComputation)
+ Transform(_ ⇒ NotUsed, that.materializedValueComputation)
CompositeModule(
if (that.isSealed) Set(that) else that.subModules,
@@ -386,9 +388,10 @@ object StreamLayout {
override def materializedValueComputation: MaterializedValueNode = Ignore
}
- final case class CopiedModule(override val shape: Shape,
- override val attributes: Attributes,
- copyOf: Module) extends Module {
+ final case class CopiedModule(
+ override val shape: Shape,
+ override val attributes: Attributes,
+ copyOf: Module) extends Module {
override val subModules: Set[Module] = Set(copyOf)
override def withAttributes(attr: Attributes): Module =
@@ -411,12 +414,12 @@ object StreamLayout {
}
final case class CompositeModule(
- override val subModules: Set[Module],
- override val shape: Shape,
- override val downstreams: Map[OutPort, InPort],
- override val upstreams: Map[InPort, OutPort],
- override val materializedValueComputation: MaterializedValueNode,
- override val attributes: Attributes) extends Module {
+ override val subModules: Set[Module],
+ override val shape: Shape,
+ override val downstreams: Map[OutPort, InPort],
+ override val upstreams: Map[InPort, OutPort],
+ override val materializedValueComputation: MaterializedValueNode,
+ override val attributes: Attributes) extends Module {
override def replaceShape(s: Shape): Module =
if (s != shape) {
@@ -443,13 +446,13 @@ object StreamLayout {
}
final case class FusedModule(
- override val subModules: Set[Module],
- override val shape: Shape,
- override val downstreams: Map[OutPort, InPort],
- override val upstreams: Map[InPort, OutPort],
- override val materializedValueComputation: MaterializedValueNode,
- override val attributes: Attributes,
- info: Fusing.StructuralInfo) extends Module {
+ override val subModules: Set[Module],
+ override val shape: Shape,
+ override val downstreams: Map[OutPort, InPort],
+ override val upstreams: Map[InPort, OutPort],
+ override val materializedValueComputation: MaterializedValueNode,
+ override val attributes: Attributes,
+ info: Fusing.StructuralInfo) extends Module {
override def isFused: Boolean = true
@@ -555,14 +558,14 @@ private[stream] final class VirtualProcessor[T] extends AtomicReference[AnyRef]
override def subscribe(s: Subscriber[_ >: T]): Unit = {
@tailrec def rec(sub: Subscriber[Any]): Unit =
get() match {
- case null => if (!compareAndSet(null, s)) rec(sub)
- case subscription: Subscription =>
+ case null ⇒ if (!compareAndSet(null, s)) rec(sub)
+ case subscription: Subscription ⇒
if (compareAndSet(subscription, Both(sub))) establishSubscription(sub, subscription)
else rec(sub)
- case pub: Publisher[_] =>
+ case pub: Publisher[_] ⇒
if (compareAndSet(pub, Inert)) pub.subscribe(sub)
else rec(sub)
- case _ =>
+ case _ ⇒
rejectAdditionalSubscriber(sub, "VirtualProcessor")
}
@@ -576,19 +579,19 @@ private[stream] final class VirtualProcessor[T] extends AtomicReference[AnyRef]
override final def onSubscribe(s: Subscription): Unit = {
@tailrec def rec(obj: AnyRef): Unit =
get() match {
- case null => if (!compareAndSet(null, obj)) rec(obj)
- case subscriber: Subscriber[_] =>
+ case null ⇒ if (!compareAndSet(null, obj)) rec(obj)
+ case subscriber: Subscriber[_] ⇒
obj match {
- case subscription: Subscription =>
+ case subscription: Subscription ⇒
if (compareAndSet(subscriber, Both.create(subscriber))) establishSubscription(subscriber, subscription)
else rec(obj)
- case pub: Publisher[_] =>
+ case pub: Publisher[_] ⇒
getAndSet(Inert) match {
- case Inert => // nothing to be done
- case _ => pub.subscribe(subscriber.asInstanceOf[Subscriber[Any]])
+ case Inert ⇒ // nothing to be done
+ case _ ⇒ pub.subscribe(subscriber.asInstanceOf[Subscriber[Any]])
}
}
- case _ =>
+ case _ ⇒
// spec violation
tryCancel(s)
}
@@ -604,7 +607,7 @@ private[stream] final class VirtualProcessor[T] extends AtomicReference[AnyRef]
val wrapped = new WrappedSubscription(subscription)
try subscriber.onSubscribe(wrapped)
catch {
- case NonFatal(ex) =>
+ case NonFatal(ex) ⇒
set(Inert)
tryCancel(subscription)
tryOnError(subscriber, ex)
@@ -619,22 +622,22 @@ private[stream] final class VirtualProcessor[T] extends AtomicReference[AnyRef]
*/
@tailrec def rec(ex: Throwable): Unit =
get() match {
- case null =>
+ case null ⇒
if (!compareAndSet(null, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec(ex)
else if (t == null) throw ex
- case s: Subscription =>
+ case s: Subscription ⇒
if (!compareAndSet(s, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec(ex)
else if (t == null) throw ex
- case Both(s) =>
+ case Both(s) ⇒
set(Inert)
try tryOnError(s, ex)
finally if (t == null) throw ex // must throw NPE, rule 2:13
- case s: Subscriber[_] => // spec violation
+ case s: Subscriber[_] ⇒ // spec violation
getAndSet(Inert) match {
- case Inert => // nothing to be done
- case _ => ErrorPublisher(ex, "failed-VirtualProcessor").subscribe(s)
+ case Inert ⇒ // nothing to be done
+ case _ ⇒ ErrorPublisher(ex, "failed-VirtualProcessor").subscribe(s)
}
- case _ => // spec violation or cancellation race, but nothing we can do
+ case _ ⇒ // spec violation or cancellation race, but nothing we can do
}
val ex = if (t == null) exceptionMustNotBeNullException else t
@@ -643,15 +646,15 @@ private[stream] final class VirtualProcessor[T] extends AtomicReference[AnyRef]
@tailrec override final def onComplete(): Unit =
get() match {
- case null => if (!compareAndSet(null, EmptyPublisher)) onComplete()
- case s: Subscription => if (!compareAndSet(s, EmptyPublisher)) onComplete()
- case Both(s) =>
+ case null ⇒ if (!compareAndSet(null, EmptyPublisher)) onComplete()
+ case s: Subscription ⇒ if (!compareAndSet(s, EmptyPublisher)) onComplete()
+ case Both(s) ⇒
set(Inert)
tryOnComplete(s)
- case s: Subscriber[_] => // spec violation
+ case s: Subscriber[_] ⇒ // spec violation
set(Inert)
EmptyPublisher.subscribe(s)
- case _ => // spec violation or cancellation race, but nothing we can do
+ case _ ⇒ // spec violation or cancellation race, but nothing we can do
}
override def onNext(t: T): Unit =
@@ -659,32 +662,32 @@ private[stream] final class VirtualProcessor[T] extends AtomicReference[AnyRef]
val ex = elementMustNotBeNullException
@tailrec def rec(): Unit =
get() match {
- case x @ (null | _: Subscription) => if (!compareAndSet(x, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec()
- case s: Subscriber[_] => try s.onError(ex) catch { case NonFatal(_) => } finally set(Inert)
- case Both(s) => try s.onError(ex) catch { case NonFatal(_) => } finally set(Inert)
- case _ => // spec violation or cancellation race, but nothing we can do
+ case x @ (null | _: Subscription) ⇒ if (!compareAndSet(x, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec()
+ case s: Subscriber[_] ⇒ try s.onError(ex) catch { case NonFatal(_) ⇒ } finally set(Inert)
+ case Both(s) ⇒ try s.onError(ex) catch { case NonFatal(_) ⇒ } finally set(Inert)
+ case _ ⇒ // spec violation or cancellation race, but nothing we can do
}
rec()
throw ex // must throw NPE, rule 2:13
} else {
@tailrec def rec(): Unit =
get() match {
- case Both(s) =>
+ case Both(s) ⇒
try s.onNext(t)
catch {
- case NonFatal(e) =>
+ case NonFatal(e) ⇒
set(Inert)
throw new IllegalStateException("Subscriber threw exception, this is in violation of rule 2:13", e)
}
- case s: Subscriber[_] => // spec violation
+ case s: Subscriber[_] ⇒ // spec violation
val ex = new IllegalStateException(noDemand)
getAndSet(Inert) match {
- case Inert => // nothing to be done
- case _ => ErrorPublisher(ex, "failed-VirtualProcessor").subscribe(s)
+ case Inert ⇒ // nothing to be done
+ case _ ⇒ ErrorPublisher(ex, "failed-VirtualProcessor").subscribe(s)
}
throw ex
- case Inert | _: Publisher[_] => // nothing to be done
- case other =>
+ case Inert | _: Publisher[_] ⇒ // nothing to be done
+ case other ⇒
val pub = ErrorPublisher(new IllegalStateException(noDemand), "failed-VirtualPublisher")
if (!compareAndSet(other, pub)) rec()
else throw pub.t
@@ -699,9 +702,9 @@ private[stream] final class VirtualProcessor[T] extends AtomicReference[AnyRef]
if (n < 1) {
tryCancel(real)
getAndSet(Inert) match {
- case Both(s) => rejectDueToNonPositiveDemand(s)
- case Inert => // another failure has won the race
- case _ => // this cannot possibly happen, but signaling errors is impossible at this point
+ case Both(s) ⇒ rejectDueToNonPositiveDemand(s)
+ case Inert ⇒ // another failure has won the race
+ case _ ⇒ // this cannot possibly happen, but signaling errors is impossible at this point
}
} else real.request(n)
}
@@ -738,12 +741,12 @@ private[impl] class VirtualPublisher[T] extends AtomicReference[AnyRef] with Pub
requireNonNullSubscriber(subscriber)
@tailrec def rec(): Unit = {
get() match {
- case null => if (!compareAndSet(null, subscriber)) rec()
- case pub: Publisher[_] =>
+ case null ⇒ if (!compareAndSet(null, subscriber)) rec()
+ case pub: Publisher[_] ⇒
if (compareAndSet(pub, Inert.subscriber)) {
pub.asInstanceOf[Publisher[T]].subscribe(subscriber)
} else rec()
- case _: Subscriber[_] => rejectAdditionalSubscriber(subscriber, "Sink.asPublisher(fanout = false)")
+ case _: Subscriber[_] ⇒ rejectAdditionalSubscriber(subscriber, "Sink.asPublisher(fanout = false)")
}
}
rec() // return value is boolean only to make the expressions above compile
@@ -751,11 +754,11 @@ private[impl] class VirtualPublisher[T] extends AtomicReference[AnyRef] with Pub
@tailrec final def registerPublisher(pub: Publisher[_]): Unit =
get() match {
- case null => if (!compareAndSet(null, pub)) registerPublisher(pub)
- case sub: Subscriber[r] =>
+ case null ⇒ if (!compareAndSet(null, pub)) registerPublisher(pub)
+ case sub: Subscriber[r] ⇒
set(Inert.subscriber)
pub.asInstanceOf[Publisher[r]].subscribe(sub)
- case _ => throw new IllegalStateException("internal error")
+ case _ ⇒ throw new IllegalStateException("internal error")
}
}
@@ -884,14 +887,14 @@ private[stream] abstract class MaterializerSession(val topLevel: StreamLayout.Mo
exitScope(copied)
case composite @ (_: CompositeModule | _: FusedModule) ⇒
materializedValues.put(composite, materializeComposite(composite, subEffectiveAttributes))
- case EmptyModule => // nothing to do or say
+ case EmptyModule ⇒ // nothing to do or say
}
}
if (MaterializerSession.Debug) {
println(f"resolving module [${System.identityHashCode(module)}%08x] computation ${module.materializedValueComputation}")
println(s" matValSrc = $matValSrc")
- println(s" matVals =\n ${materializedValues.asScala.map(p ⇒ "%08x".format(System.identityHashCode(p._1)) -> p._2).mkString("\n ")}")
+ println(s" matVals =\n ${materializedValues.asScala.map(p ⇒ "%08x".format(System.identityHashCode(p._1)) → p._2).mkString("\n ")}")
}
val ret = resolveMaterialized(module.materializedValueComputation, materializedValues, 2)
@@ -934,11 +937,11 @@ private[stream] abstract class MaterializerSession(val topLevel: StreamLayout.Mo
subscribers.put(in, subscriberOrVirtual)
currentLayout.upstreams.get(in) match {
- case Some(upstream) =>
+ case Some(upstream) ⇒
val publisher = publishers.get(upstream)
if (publisher ne null) doSubscribe(publisher, subscriberOrVirtual)
// Interface (unconnected) ports of the current scope will be wired when exiting the scope (or some parent scope)
- case None =>
+ case None ⇒
}
}
@@ -946,27 +949,28 @@ private[stream] abstract class MaterializerSession(val topLevel: StreamLayout.Mo
publishers.put(out, publisher)
currentLayout.downstreams.get(out) match {
- case Some(downstream) =>
+ case Some(downstream) ⇒
val subscriber = subscribers.get(downstream)
if (subscriber ne null) doSubscribe(publisher, subscriber)
- // Interface (unconnected) ports of the current scope will be wired when exiting the scope
- case None =>
+ // Interface (unconnected) ports of the current scope will be wired when exiting the scope
+ case None ⇒
}
}
private def doSubscribe(publisher: Publisher[_ <: Any], subscriberOrVirtual: AnyRef): Unit =
subscriberOrVirtual match {
- case s: Subscriber[_] => publisher.subscribe(s.asInstanceOf[Subscriber[Any]])
- case v: VirtualPublisher[_] => v.registerPublisher(publisher)
+ case s: Subscriber[_] ⇒ publisher.subscribe(s.asInstanceOf[Subscriber[Any]])
+ case v: VirtualPublisher[_] ⇒ v.registerPublisher(publisher)
}
}
/**
- * INTERNAL API
- */
-private[akka] final case class ProcessorModule[In, Out, Mat](val createProcessor: () ⇒ (Processor[In, Out], Mat),
- attributes: Attributes = DefaultAttributes.processor) extends StreamLayout.AtomicModule {
+ * INTERNAL API
+ */
+private[akka] final case class ProcessorModule[In, Out, Mat](
+ val createProcessor: () ⇒ (Processor[In, Out], Mat),
+ attributes: Attributes = DefaultAttributes.processor) extends StreamLayout.AtomicModule {
val inPort = Inlet[In]("ProcessorModule.in")
val outPort = Outlet[Out]("ProcessorModule.out")
override val shape = new FlowShape(inPort, outPort)
diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala
index da782c555d..7493958db4 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala
@@ -89,7 +89,8 @@ private[akka] trait StreamSubscriptionTimeoutSupport {
}
private def warn(target: Publisher[_], timeout: FiniteDuration): Unit = {
- log.warning("Timed out {} detected (after {} ms)! You should investigate if you either cancel or consume all {} instances",
+ log.warning(
+ "Timed out {} detected (after {} ms)! You should investigate if you either cancel or consume all {} instances",
target, timeout.toMillis, target.getClass.getCanonicalName)
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala
index 4cb2c28f89..f0a52a62a9 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala
@@ -14,9 +14,10 @@ object SubFlowImpl {
}
}
-class SubFlowImpl[In, Out, Mat, F[+_], C](val subFlow: Flow[In, Out, NotUsed],
- mergeBackFunction: SubFlowImpl.MergeBack[In, F],
- finishFunction: Sink[In, NotUsed] ⇒ C)
+class SubFlowImpl[In, Out, Mat, F[+_], C](
+ val subFlow: Flow[In, Out, NotUsed],
+ mergeBackFunction: SubFlowImpl.MergeBack[In, F],
+ finishFunction: Sink[In, NotUsed] ⇒ C)
extends SubFlow[Out, Mat, F, C] {
override def via[T, Mat2](flow: Graph[FlowShape[Out, T], Mat2]): Repr[T] =
diff --git a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala
index 42cb4023f9..54a740cff4 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala
@@ -14,11 +14,12 @@ import scala.concurrent.duration.{ FiniteDuration, _ }
/**
* INTERNAL API
*/
-private[stream] class Throttle[T](cost: Int,
- per: FiniteDuration,
- maximumBurst: Int,
- costCalculation: (T) ⇒ Int,
- mode: ThrottleMode)
+private[stream] class Throttle[T](
+ cost: Int,
+ per: FiniteDuration,
+ maximumBurst: Int,
+ costCalculation: (T) ⇒ Int,
+ mode: ThrottleMode)
extends SimpleLinearGraphStage[T] {
require(cost > 0, "cost must be > 0")
require(per.toNanos > 0, "per time must be > 0")
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala
index 724c1511f6..1258ec103a 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala
@@ -308,13 +308,13 @@ private[stream] object ActorGraphInterpreter {
* INTERNAL API
*/
private[stream] final class GraphInterpreterShell(
- assembly: GraphAssembly,
- inHandlers: Array[InHandler],
+ assembly: GraphAssembly,
+ inHandlers: Array[InHandler],
outHandlers: Array[OutHandler],
- logics: Array[GraphStageLogic],
- shape: Shape,
- settings: ActorMaterializerSettings,
- val mat: ActorMaterializerImpl) {
+ logics: Array[GraphStageLogic],
+ shape: Shape,
+ settings: ActorMaterializerSettings,
+ val mat: ActorMaterializerImpl) {
import ActorGraphInterpreter._
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala
index 273e0cb672..4930267cb0 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala
@@ -30,7 +30,7 @@ private[stream] object Fusing {
def aggressive[S <: Shape, M](g: Graph[S, M]): FusedGraph[S, M] =
g match {
case fg: FusedGraph[_, _] ⇒ fg
- case FusedGraph(module, shape) => FusedGraph(module, shape)
+ case FusedGraph(module, shape) ⇒ FusedGraph(module, shape)
case _ ⇒ doAggressive(g)
}
@@ -160,7 +160,7 @@ private[stream] object Fusing {
}
pos += 1
- case _ => throw new IllegalArgumentException("unexpected module structure")
+ case _ ⇒ throw new IllegalArgumentException("unexpected module structure")
}
val outsB2 = new Array[Outlet[_]](insB2.size)
@@ -186,7 +186,7 @@ private[stream] object Fusing {
}
}
pos += 1
- case _ => throw new IllegalArgumentException("unexpected module structure")
+ case _ ⇒ throw new IllegalArgumentException("unexpected module structure")
}
/*
@@ -217,8 +217,8 @@ private[stream] object Fusing {
// FIXME attributes should contain some naming info and async boundary where needed
val firstModule = group.iterator.next() match {
- case c: CopiedModule => c
- case _ => throw new IllegalArgumentException("unexpected module structure")
+ case c: CopiedModule ⇒ c
+ case _ ⇒ throw new IllegalArgumentException("unexpected module structure")
}
val async = if (isAsync(firstModule)) Attributes(AsyncBoundary) else Attributes.none
val disp = dispatcher(firstModule) match {
@@ -253,11 +253,12 @@ private[stream] object Fusing {
* correspondence is then used during materialization to trigger these sources
* when “their” node has received its value.
*/
- private def descend(m: Module,
- inheritedAttributes: Attributes,
- struct: BuildStructuralInfo,
- openGroup: ju.Set[Module],
- indent: Int): List[(Module, MaterializedValueNode)] = {
+ private def descend(
+ m: Module,
+ inheritedAttributes: Attributes,
+ struct: BuildStructuralInfo,
+ openGroup: ju.Set[Module],
+ indent: Int): List[(Module, MaterializedValueNode)] = {
def log(msg: String): Unit = println(" " * indent + msg)
val async = m match {
case _: GraphStageModule ⇒ m.attributes.contains(AsyncBoundary)
@@ -327,7 +328,7 @@ private[stream] object Fusing {
struct.registerInternals(newShape, indent)
copy
- case _ => throw new IllegalArgumentException("unexpected module structure")
+ case _ ⇒ throw new IllegalArgumentException("unexpected module structure")
}
val newgm = gm.copy(shape = oldShape.copyFromPorts(oldIns.toList, oldOuts.toList), matValIDs = newids)
// make sure to add all the port mappings from old GraphModule Shape to new shape
@@ -336,14 +337,14 @@ private[stream] object Fusing {
var result = List.empty[(Module, MaterializedValueNode)]
var i = 0
while (i < mvids.length) {
- result ::= mvids(i) -> Atomic(newids(i))
+ result ::= mvids(i) → Atomic(newids(i))
i += 1
}
- result ::= m -> Atomic(newgm)
+ result ::= m → Atomic(newgm)
result
case _ ⇒
if (Debug) log(s"atomic module $m")
- List(m -> struct.addModule(m, localGroup, inheritedAttributes, indent))
+ List(m → struct.addModule(m, localGroup, inheritedAttributes, indent))
}
} else {
val attributes = inheritedAttributes and m.attributes
@@ -351,7 +352,7 @@ private[stream] object Fusing {
case CopiedModule(shape, _, copyOf) ⇒
val ret =
descend(copyOf, attributes, struct, localGroup, indent + 1) match {
- case xs @ (_, mat) :: _ ⇒ (m -> mat) :: xs
+ case xs @ (_, mat) :: _ ⇒ (m → mat) :: xs
case _ ⇒ throw new IllegalArgumentException("cannot happen")
}
struct.rewire(copyOf.shape, shape, indent)
@@ -390,7 +391,7 @@ private[stream] object Fusing {
val ms = c.copyOf.asInstanceOf[GraphStageModule].stage.asInstanceOf[MaterializedValueSource[Any]]
val mapped = ms.computation match {
case Atomic(sub) ⇒ subMat(sub)
- case Ignore => Ignore
+ case Ignore ⇒ Ignore
case other ⇒ matNodeMapping.get(other)
}
if (Debug) log(s"materialized value source: ${c.copyOf} -> $mapped")
@@ -400,7 +401,7 @@ private[stream] object Fusing {
struct.replace(c, replacement, localGroup)
}
// the result for each level is the materialized value computation
- List(m -> newMat)
+ List(m → newMat)
}
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala
index 751a9ebd2a..fe6ba47446 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala
@@ -99,12 +99,13 @@ private[akka] object GraphInterpreter {
* corresponding segments of these arrays matches the exact same order of the ports in the [[Shape]].
*
*/
- final class GraphAssembly(val stages: Array[GraphStageWithMaterializedValue[Shape, Any]],
- val originalAttributes: Array[Attributes],
- val ins: Array[Inlet[_]],
- val inOwners: Array[Int],
- val outs: Array[Outlet[_]],
- val outOwners: Array[Int]) {
+ final class GraphAssembly(
+ val stages: Array[GraphStageWithMaterializedValue[Shape, Any]],
+ val originalAttributes: Array[Attributes],
+ val ins: Array[Inlet[_]],
+ val inOwners: Array[Int],
+ val outs: Array[Outlet[_]],
+ val outOwners: Array[Int]) {
require(ins.length == inOwners.length && inOwners.length == outs.length && outs.length == outOwners.length)
def connectionCount: Int = ins.length
@@ -119,10 +120,11 @@ private[akka] object GraphInterpreter {
* - array of the logics
* - materialized value
*/
- def materialize(inheritedAttributes: Attributes,
- copiedModules: Array[Module],
- matVal: ju.Map[Module, Any],
- register: MaterializedValueSource[Any] ⇒ Unit): (Array[InHandler], Array[OutHandler], Array[GraphStageLogic]) = {
+ def materialize(
+ inheritedAttributes: Attributes,
+ copiedModules: Array[Module],
+ matVal: ju.Map[Module, Any],
+ register: MaterializedValueSource[Any] ⇒ Unit): (Array[InHandler], Array[OutHandler], Array[GraphStageLogic]) = {
val logics = Array.ofDim[GraphStageLogic](stages.length)
var i = 0
@@ -208,9 +210,10 @@ private[akka] object GraphInterpreter {
/**
* INTERNAL API
*/
- final def apply(inlets: immutable.Seq[Inlet[_]],
- outlets: immutable.Seq[Outlet[_]],
- stages: GraphStageWithMaterializedValue[Shape, _]*): GraphAssembly = {
+ final def apply(
+ inlets: immutable.Seq[Inlet[_]],
+ outlets: immutable.Seq[Outlet[_]],
+ stages: GraphStageWithMaterializedValue[Shape, _]*): GraphAssembly = {
// add the contents of an iterator to an array starting at idx
@tailrec def add[T](i: Iterator[T], a: Array[T], idx: Int): Array[T] =
if (i.hasNext) {
@@ -345,14 +348,14 @@ private[akka] object GraphInterpreter {
*/
private[stream] final class GraphInterpreter(
private val assembly: GraphInterpreter.GraphAssembly,
- val materializer: Materializer,
- val log: LoggingAdapter,
- val inHandlers: Array[InHandler], // Lookup table for the InHandler of a connection
- val outHandlers: Array[OutHandler], // Lookup table for the outHandler of the connection
- val logics: Array[GraphStageLogic], // Array of stage logics
- val onAsyncInput: (GraphStageLogic, Any, (Any) ⇒ Unit) ⇒ Unit,
- val fuzzingMode: Boolean,
- val context: ActorRef) {
+ val materializer: Materializer,
+ val log: LoggingAdapter,
+ val inHandlers: Array[InHandler], // Lookup table for the InHandler of a connection
+ val outHandlers: Array[OutHandler], // Lookup table for the outHandler of the connection
+ val logics: Array[GraphStageLogic], // Array of stage logics
+ val onAsyncInput: (GraphStageLogic, Any, (Any) ⇒ Unit) ⇒ Unit,
+ val fuzzingMode: Boolean,
+ val context: ActorRef) {
import GraphInterpreter._
// Maintains additional information for events, basically elements in-flight, or failure.
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala
index 88319b8e83..7b53a33510 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala
@@ -25,9 +25,10 @@ import scala.util.Try
/**
* INTERNAL API
*/
-private[akka] final case class GraphStageModule(shape: Shape,
- attributes: Attributes,
- stage: GraphStageWithMaterializedValue[Shape, Any]) extends AtomicModule {
+private[akka] final case class GraphStageModule(
+ shape: Shape,
+ attributes: Attributes,
+ stage: GraphStageWithMaterializedValue[Shape, Any]) extends AtomicModule {
override def carbonCopy: Module = CopiedModule(shape.deepCopy(), Attributes.none, this)
override def replaceShape(s: Shape): Module =
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/IteratorInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/IteratorInterpreter.scala
index 31be808ca3..263cece018 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/IteratorInterpreter.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/IteratorInterpreter.scala
@@ -100,7 +100,7 @@ private[akka] object IteratorInterpreter {
* INTERNAL API
*/
private[akka] class IteratorInterpreter[I, O](
- val input: Iterator[I],
+ val input: Iterator[I],
val stages: Seq[GraphStageWithMaterializedValue[FlowShape[_, _], Any]]) {
import akka.stream.impl.fusing.IteratorInterpreter._
diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala
index f90e34e4bb..9e50f57ea2 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala
@@ -540,22 +540,22 @@ private[akka] final case class Buffer[T](size: Int, overflowStrategy: OverflowSt
if (buffer.isFull) buffer.dropHead()
buffer.enqueue(elem)
ctx.pull()
- case DropTail ⇒ (ctx, elem) ⇒
+ case DropTail ⇒ (ctx, elem) ⇒
if (buffer.isFull) buffer.dropTail()
buffer.enqueue(elem)
ctx.pull()
- case DropBuffer ⇒ (ctx, elem) ⇒
+ case DropBuffer ⇒ (ctx, elem) ⇒
if (buffer.isFull) buffer.clear()
buffer.enqueue(elem)
ctx.pull()
- case DropNew ⇒ (ctx, elem) ⇒
+ case DropNew ⇒ (ctx, elem) ⇒
if (!buffer.isFull) buffer.enqueue(elem)
ctx.pull()
- case Backpressure ⇒ (ctx, elem) ⇒
+ case Backpressure ⇒ (ctx, elem) ⇒
buffer.enqueue(elem)
if (buffer.isFull) ctx.holdUpstream()
else ctx.pull()
- case Fail ⇒ (ctx, elem) ⇒
+ case Fail ⇒ (ctx, elem) ⇒
if (buffer.isFull) ctx.fail(new BufferOverflowException(s"Buffer overflow (max capacity was: $size)!"))
else {
buffer.enqueue(elem)
@@ -908,7 +908,7 @@ private[akka] final case class MapAsyncUnordered[In, Out](parallelism: Int, f: I
*/
private[akka] final case class Log[T](name: String, extract: T ⇒ Any,
logAdapter: Option[LoggingAdapter],
- decider: Supervision.Decider) extends PushStage[T, T] {
+ decider: Supervision.Decider) extends PushStage[T, T] {
import Log._
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala
index 45d83b41aa..1a0f75563f 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala
@@ -82,9 +82,10 @@ private[akka] object ByteStringParser {
* @param acceptUpstreamFinish - if true - stream will complete when received `onUpstreamFinish`, if "false"
* - onTruncation will be called
*/
- case class ParseResult[+T](result: Option[T],
- nextStep: ParseStep[T],
- acceptUpstreamFinish: Boolean = true)
+ case class ParseResult[+T](
+ result: Option[T],
+ nextStep: ParseStep[T],
+ acceptUpstreamFinish: Boolean = true)
trait ParseStep[+T] {
/**
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala
index f0ac7c9a1e..b953a08cea 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala
@@ -90,9 +90,10 @@ final private[stream] class InputStreamSinkStage(readTimeout: FiniteDuration) ex
* INTERNAL API
* InputStreamAdapter that interacts with InputStreamSinkStage
*/
-private[akka] class InputStreamAdapter(sharedBuffer: BlockingQueue[StreamToAdapterMessage],
- sendToStage: (AdapterToStageMessage) ⇒ Unit,
- readTimeout: FiniteDuration)
+private[akka] class InputStreamAdapter(
+ sharedBuffer: BlockingQueue[StreamToAdapterMessage],
+ sendToStage: (AdapterToStageMessage) ⇒ Unit,
+ readTimeout: FiniteDuration)
extends InputStream {
var isInitialized = false
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala
index 12bab04e08..441444af95 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala
@@ -154,10 +154,11 @@ final private[stream] class OutputStreamSourceStage(writeTimeout: FiniteDuration
}
}
-private[akka] class OutputStreamAdapter(dataQueue: BlockingQueue[ByteString],
- downstreamStatus: AtomicReference[DownstreamStatus],
- sendToStage: (AdapterToStageMessage) ⇒ Future[Unit],
- writeTimeout: FiniteDuration)
+private[akka] class OutputStreamAdapter(
+ dataQueue: BlockingQueue[ByteString],
+ downstreamStatus: AtomicReference[DownstreamStatus],
+ sendToStage: (AdapterToStageMessage) ⇒ Future[Unit],
+ writeTimeout: FiniteDuration)
extends OutputStream {
var isActive = true
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala
index 381b0ec20a..3addd75cd6 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala
@@ -25,14 +25,15 @@ import akka.stream.TLSProtocol._
*/
private[akka] object TLSActor {
- def props(settings: ActorMaterializerSettings,
- sslContext: SSLContext,
- sslConfig: Option[AkkaSSLConfig],
- firstSession: NegotiateNewSession,
- role: TLSRole,
- closing: TLSClosing,
- hostInfo: Option[(String, Int)],
- tracing: Boolean = false): Props =
+ def props(
+ settings: ActorMaterializerSettings,
+ sslContext: SSLContext,
+ sslConfig: Option[AkkaSSLConfig],
+ firstSession: NegotiateNewSession,
+ role: TLSRole,
+ closing: TLSClosing,
+ hostInfo: Option[(String, Int)],
+ tracing: Boolean = false): Props =
Props(new TLSActor(settings, sslContext, sslConfig, firstSession, role, closing, hostInfo, tracing)).withDeploy(Deploy.local)
final val TransportIn = 0
@@ -45,11 +46,12 @@ private[akka] object TLSActor {
/**
* INTERNAL API.
*/
-private[akka] class TLSActor(settings: ActorMaterializerSettings,
- sslContext: SSLContext,
- externalSslConfig: Option[AkkaSSLConfig],
- firstSession: NegotiateNewSession, role: TLSRole, closing: TLSClosing,
- hostInfo: Option[(String, Int)], tracing: Boolean)
+private[akka] class TLSActor(
+ settings: ActorMaterializerSettings,
+ sslContext: SSLContext,
+ externalSslConfig: Option[AkkaSSLConfig],
+ firstSession: NegotiateNewSession, role: TLSRole, closing: TLSClosing,
+ hostInfo: Option[(String, Int)], tracing: Boolean)
extends Actor with ActorLogging with Pump {
import TLSActor._
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala
index 048a541b04..b93b385597 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala
@@ -26,13 +26,14 @@ import scala.concurrent.{ Future, Promise }
/**
* INTERNAL API
*/
-private[stream] class ConnectionSourceStage(val tcpManager: ActorRef,
- val endpoint: InetSocketAddress,
- val backlog: Int,
- val options: immutable.Traversable[SocketOption],
- val halfClose: Boolean,
- val idleTimeout: Duration,
- val bindShutdownTimeout: FiniteDuration)
+private[stream] class ConnectionSourceStage(
+ val tcpManager: ActorRef,
+ val endpoint: InetSocketAddress,
+ val backlog: Int,
+ val options: immutable.Traversable[SocketOption],
+ val halfClose: Boolean,
+ val idleTimeout: Duration,
+ val bindShutdownTimeout: FiniteDuration)
extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[StreamTcp.ServerBinding]] {
import ConnectionSourceStage._
@@ -159,10 +160,10 @@ private[stream] object TcpConnectionStage {
def halfClose: Boolean
}
case class Outbound(
- manager: ActorRef,
- connectCmd: Connect,
+ manager: ActorRef,
+ connectCmd: Connect,
localAddressPromise: Promise[InetSocketAddress],
- halfClose: Boolean) extends TcpRole
+ halfClose: Boolean) extends TcpRole
case class Inbound(connection: ActorRef, halfClose: Boolean) extends TcpRole
/*
@@ -272,7 +273,8 @@ private[stream] object TcpConnectionStage {
override def onUpstreamFailure(ex: Throwable): Unit = {
if (connection != null) {
if (interpreter.log.isDebugEnabled) {
- interpreter.log.debug("Aborting tcp connection because of upstream failure: {}\n{}",
+ interpreter.log.debug(
+ "Aborting tcp connection because of upstream failure: {}\n{}",
ex.getMessage,
ex.getStackTrace.mkString("\n"))
}
@@ -317,12 +319,13 @@ private[stream] class IncomingConnectionStage(connection: ActorRef, remoteAddres
/**
* INTERNAL API
*/
-private[stream] class OutgoingConnectionStage(manager: ActorRef,
- remoteAddress: InetSocketAddress,
- localAddress: Option[InetSocketAddress] = None,
- options: immutable.Traversable[SocketOption] = Nil,
- halfClose: Boolean = true,
- connectTimeout: Duration = Duration.Inf)
+private[stream] class OutgoingConnectionStage(
+ manager: ActorRef,
+ remoteAddress: InetSocketAddress,
+ localAddress: Option[InetSocketAddress] = None,
+ options: immutable.Traversable[SocketOption] = Nil,
+ halfClose: Boolean = true,
+ connectTimeout: Duration = Duration.Inf)
extends GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Future[StreamTcp.OutgoingConnection]] {
import TcpConnectionStage._
diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala
index 71e6214ffd..e5b31bd6bf 100644
--- a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala
+++ b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala
@@ -14,10 +14,10 @@ import com.typesafe.sslconfig.akka.AkkaSSLConfig
private[akka] final case class TlsModule(plainIn: Inlet[SslTlsOutbound], plainOut: Outlet[SslTlsInbound],
cipherIn: Inlet[ByteString], cipherOut: Outlet[ByteString],
shape: Shape, attributes: Attributes,
- sslContext: SSLContext,
- sslConfig: Option[AkkaSSLConfig],
+ sslContext: SSLContext,
+ sslConfig: Option[AkkaSSLConfig],
firstSession: NegotiateNewSession,
- role: TLSRole, closing: TLSClosing, hostInfo: Option[(String, Int)]) extends AtomicModule {
+ role: TLSRole, closing: TLSClosing, hostInfo: Option[(String, Int)]) extends AtomicModule {
override def withAttributes(att: Attributes): TlsModule = copy(attributes = att)
override def carbonCopy: TlsModule =
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala
index 716d8b1365..88ba60b061 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala
@@ -46,8 +46,8 @@ object BidiFlow {
*
*/
def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](
- flow1: Graph[FlowShape[I1, O1], M1],
- flow2: Graph[FlowShape[I2, O2], M2],
+ flow1: Graph[FlowShape[I1, O1], M1],
+ flow2: Graph[FlowShape[I2, O2], M2],
combine: function.Function2[M1, M2, M]): BidiFlow[I1, O1, I2, O2, M] = {
new BidiFlow(scaladsl.BidiFlow.fromFlowsMat(flow1, flow2)(combinerToScala(combine)))
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala
index f546754483..5ec142d2bb 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala
@@ -800,27 +800,27 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
new Flow(delegate.recoverWith(pf))
/**
- * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
- * a failure has been recovered up to `attempts` number of times so that each time there is a failure
- * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
- * attempt to recover at all. Passing in -1 will behave exactly the same as `recoverWith`.
- *
- * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
- * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
- *
- * '''Emits when''' element is available from the upstream or upstream is failed and element is available
- * from alternative Source
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or upstream failed with exception pf can handle
- *
- * '''Cancels when''' downstream cancels
- *
- * @param attempts Maximum number of retries or -1 to retry indefinitely
- * @param pf Receives the failure cause and returns the new Source to be materialized if any
- * @throws IllegalArgumentException if `attempts` is a negative number other than -1
- */
+ * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
+ * a failure has been recovered up to `attempts` number of times so that each time there is a failure
+ * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
+ * attempt to recover at all. Passing in -1 will behave exactly the same as `recoverWith`.
+ *
+ * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
+ * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
+ *
+ * '''Emits when''' element is available from the upstream or upstream is failed and element is available
+ * from alternative Source
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or upstream failed with exception pf can handle
+ *
+ * '''Cancels when''' downstream cancels
+ *
+ * @param attempts Maximum number of retries or -1 to retry indefinitely
+ * @param pf Receives the failure cause and returns the new Source to be materialized if any
+ * @throws IllegalArgumentException if `attempts` is a negative number other than -1
+ */
def recoverWithRetries[T >: Out](attempts: Int, pf: PartialFunction[Throwable, _ <: Graph[SourceShape[T], NotUsed]]): javadsl.Flow[In, T, Mat @uncheckedVariance] =
new Flow(delegate.recoverWithRetries(attempts, pf))
@@ -1364,8 +1364,9 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
*
* @see [[#alsoTo]]
*/
- def alsoToMat[M2, M3](that: Graph[SinkShape[Out], M2],
- matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
+ def alsoToMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] =
new Flow(delegate.alsoToMat(that)(combinerToScala(matF)))
/**
@@ -1453,8 +1454,9 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
*
* @see [[#merge]]
*/
- def mergeMat[T >: Out, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, T, M2] =
+ def mergeMat[T >: Out, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, T, M2] =
mergeMat(that, matF, eagerComplete = false)
/**
@@ -1466,9 +1468,10 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
*
* @see [[#merge]]
*/
- def mergeMat[T >: Out, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2],
- eagerComplete: Boolean): javadsl.Flow[In, T, M2] =
+ def mergeMat[T >: Out, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2],
+ eagerComplete: Boolean): javadsl.Flow[In, T, M2] =
new Flow(delegate.mergeMat(that)(combinerToScala(matF)))
/**
@@ -1527,9 +1530,11 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
*
* @see [[#zip]]
*/
- def zipMat[T, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out @uncheckedVariance Pair T, M2] =
- this.viaMat(Flow.fromGraph(GraphDSL.create(that,
+ def zipMat[T, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out @uncheckedVariance Pair T, M2] =
+ this.viaMat(Flow.fromGraph(GraphDSL.create(
+ that,
new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out @uncheckedVariance Pair T]] {
def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out @uncheckedVariance Pair T] = {
val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T])
@@ -1550,8 +1555,9 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] =
new Flow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1563,9 +1569,10 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
*
* @see [[#zipWith]]
*/
- def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M],
- combine: function.Function2[Out, Out2, Out3],
- matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] =
+ def zipWithMat[Out2, Out3, M, M2](
+ that: Graph[SourceShape[Out2], M],
+ combine: function.Function2[Out, Out2, Out3],
+ matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] =
new Flow(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF)))
/**
@@ -1615,18 +1622,18 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
new Flow(delegate.idleTimeout(timeout))
/**
- * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
- * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
- * so the resolution of the check is one period (equals to timeout value).
- *
- * '''Emits when''' upstream emits an element
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
- *
- * '''Cancels when''' downstream cancels
- */
+ * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
+ * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
+ * so the resolution of the check is one period (equals to timeout value).
+ *
+ * '''Emits when''' upstream emits an element
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
+ *
+ * '''Cancels when''' downstream cancels
+ */
def backpressureTimeout(timeout: FiniteDuration): javadsl.Flow[In, Out, Mat] =
new Flow(delegate.backpressureTimeout(timeout))
@@ -1737,11 +1744,11 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
new Flow(delegate.watchTermination()((left, right) ⇒ matF(left, right.toJava)))
/**
- * Materializes to `FlowMonitor[Out]` that allows monitoring of the the current flow. All events are propagated
- * by the monitor unchanged. Note that the monitor inserts a memory barrier every time it processes an
- * event, and may therefor affect performance.
- * The `combine` function is used to combine the `FlowMonitor` with this flow's materialized value.
- */
+ * Materializes to `FlowMonitor[Out]` that allows monitoring of the the current flow. All events are propagated
+ * by the monitor unchanged. Note that the monitor inserts a memory barrier every time it processes an
+ * event, and may therefor affect performance.
+ * The `combine` function is used to combine the `FlowMonitor` with this flow's materialized value.
+ */
def monitor[M]()(combine: function.Function2[Mat, FlowMonitor[Out], M]): javadsl.Flow[In, Out, M] =
new Flow(delegate.monitor()(combinerToScala(combine)))
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala
index c3002bfc09..b7b21030b1 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala
@@ -66,9 +66,10 @@ object Framing {
* this Flow will fail the stream. This length *includes* the header (i.e the offset and
* the length of the size field)
*/
- def lengthField(fieldLength: Int,
- fieldOffset: Int,
- maximumFrameLength: Int): Flow[ByteString, ByteString, NotUsed] =
+ def lengthField(
+ fieldLength: Int,
+ fieldOffset: Int,
+ maximumFrameLength: Int): Flow[ByteString, ByteString, NotUsed] =
scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength).asJava
/**
@@ -85,10 +86,11 @@ object Framing {
* the length of the size field)
* @param byteOrder The ''ByteOrder'' to be used when decoding the field
*/
- def lengthField(fieldLength: Int,
- fieldOffset: Int,
- maximumFrameLength: Int,
- byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] =
+ def lengthField(
+ fieldLength: Int,
+ fieldOffset: Int,
+ maximumFrameLength: Int,
+ byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] =
scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength, byteOrder).asJava
/**
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala
index 7a70645f70..a5fbb9f855 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala
@@ -278,7 +278,7 @@ object ZipN {
object ZipWithN {
def create[A, O](zipper: function.Function[java.util.List[A], O], n: Int): Graph[UniformFanInShape[A, O], NotUsed] = {
import scala.collection.JavaConverters._
- scaladsl.ZipWithN[A, O](seq => zipper.apply(seq.asJava))(n)
+ scaladsl.ZipWithN[A, O](seq ⇒ zipper.apply(seq.asJava))(n)
}
}
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala
index 54c94b8215..2362794819 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala
@@ -305,7 +305,7 @@ object Source {
*/
def zipWithN[T, O](zipper: function.Function[java.util.List[T], O], sources: java.util.List[Source[T, _ <: Any]]): Source[O, NotUsed] = {
val seq = if (sources != null) Util.immutableSeq(sources).map(_.asScala) else immutable.Seq()
- new Source(scaladsl.Source.zipWithN[T, O](seq => zipper.apply(seq.asJava))(seq))
+ new Source(scaladsl.Source.zipWithN[T, O](seq ⇒ zipper.apply(seq.asJava))(seq))
}
/**
@@ -341,61 +341,65 @@ object Source {
new Source(scaladsl.Source.queue[T](bufferSize, overflowStrategy).mapMaterializedValue(new SourceQueueAdapter(_)))
/**
- * Start a new `Source` from some resource which can be opened, read and closed.
- * Interaction with resource happens in a blocking way.
- *
- * Example:
- * {{{
- * Source.unfoldResource(
- * () -> new BufferedReader(new FileReader("...")),
- * reader -> reader.readLine(),
- * reader -> reader.close())
- * }}}
- *
- * You can use the supervision strategy to handle exceptions for `read` function. All exceptions thrown by `create`
- * or `close` will fail the stream.
- *
- * `Restart` supervision strategy will close and create blocking IO again. Default strategy is `Stop` which means
- * that stream will be terminated on error in `read` function by default.
- *
- * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
- * set it for a given Source by using [[ActorAttributes]].
- *
- * @param create - function that is called on stream start and creates/opens resource.
- * @param read - function that reads data from opened resource. It is called each time backpressure signal
- * is received. Stream calls close and completes when `read` returns None.
- * @param close - function that closes resource
- */
- def unfoldResource[T, S](create: function.Creator[S],
- read: function.Function[S, Optional[T]],
- close: function.Procedure[S]): javadsl.Source[T, NotUsed] =
- new Source(scaladsl.Source.unfoldResource[T,S](create.create,
+ * Start a new `Source` from some resource which can be opened, read and closed.
+ * Interaction with resource happens in a blocking way.
+ *
+ * Example:
+ * {{{
+ * Source.unfoldResource(
+ * () -> new BufferedReader(new FileReader("...")),
+ * reader -> reader.readLine(),
+ * reader -> reader.close())
+ * }}}
+ *
+ * You can use the supervision strategy to handle exceptions for `read` function. All exceptions thrown by `create`
+ * or `close` will fail the stream.
+ *
+ * `Restart` supervision strategy will close and create blocking IO again. Default strategy is `Stop` which means
+ * that stream will be terminated on error in `read` function by default.
+ *
+ * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
+ * set it for a given Source by using [[ActorAttributes]].
+ *
+ * @param create - function that is called on stream start and creates/opens resource.
+ * @param read - function that reads data from opened resource. It is called each time backpressure signal
+ * is received. Stream calls close and completes when `read` returns None.
+ * @param close - function that closes resource
+ */
+ def unfoldResource[T, S](
+ create: function.Creator[S],
+ read: function.Function[S, Optional[T]],
+ close: function.Procedure[S]): javadsl.Source[T, NotUsed] =
+ new Source(scaladsl.Source.unfoldResource[T, S](
+ create.create,
(s: S) ⇒ read.apply(s).asScala, close.apply))
/**
- * Start a new `Source` from some resource which can be opened, read and closed.
- * It's similar to `unfoldResource` but takes functions that return `CopletionStage` instead of plain values.
- *
- * You can use the supervision strategy to handle exceptions for `read` function or failures of produced `Futures`.
- * All exceptions thrown by `create` or `close` as well as fails of returned futures will fail the stream.
- *
- * `Restart` supervision strategy will close and create resource. Default strategy is `Stop` which means
- * that stream will be terminated on error in `read` function (or future) by default.
- *
- * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
- * set it for a given Source by using [[ActorAttributes]].
- *
- * @param create - function that is called on stream start and creates/opens resource.
- * @param read - function that reads data from opened resource. It is called each time backpressure signal
- * is received. Stream calls close and completes when `CompletionStage` from read function returns None.
- * @param close - function that closes resource
- */
- def unfoldResourceAsync[T, S](create: function.Creator[CompletionStage[S]],
- read: function.Function[S, CompletionStage[Optional[T]]],
- close: function.Function[S, CompletionStage[Done]]): javadsl.Source[T, NotUsed] =
- new Source(scaladsl.Source.unfoldResourceAsync[T,S](() ⇒ create.create().toScala,
- (s: S) ⇒ read.apply(s).toScala.map(_.asScala)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext),
- (s: S) ⇒ close.apply(s).toScala))
+ * Start a new `Source` from some resource which can be opened, read and closed.
+ * It's similar to `unfoldResource` but takes functions that return `CopletionStage` instead of plain values.
+ *
+ * You can use the supervision strategy to handle exceptions for `read` function or failures of produced `Futures`.
+ * All exceptions thrown by `create` or `close` as well as fails of returned futures will fail the stream.
+ *
+ * `Restart` supervision strategy will close and create resource. Default strategy is `Stop` which means
+ * that stream will be terminated on error in `read` function (or future) by default.
+ *
+ * You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
+ * set it for a given Source by using [[ActorAttributes]].
+ *
+ * @param create - function that is called on stream start and creates/opens resource.
+ * @param read - function that reads data from opened resource. It is called each time backpressure signal
+ * is received. Stream calls close and completes when `CompletionStage` from read function returns None.
+ * @param close - function that closes resource
+ */
+ def unfoldResourceAsync[T, S](
+ create: function.Creator[CompletionStage[S]],
+ read: function.Function[S, CompletionStage[Optional[T]]],
+ close: function.Function[S, CompletionStage[Done]]): javadsl.Source[T, NotUsed] =
+ new Source(scaladsl.Source.unfoldResourceAsync[T, S](
+ () ⇒ create.create().toScala,
+ (s: S) ⇒ read.apply(s).toScala.map(_.asScala)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext),
+ (s: S) ⇒ close.apply(s).toScala))
}
/**
@@ -577,8 +581,9 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
*
* @see [[#concat]].
*/
- def concatMat[T >: Out, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
+ def concatMat[T >: Out, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
new Source(delegate.concatMat(that)(combinerToScala(matF)))
/**
@@ -617,8 +622,9 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
*
* @see [[#prepend]].
*/
- def prependMat[T >: Out, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
+ def prependMat[T >: Out, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
new Source(delegate.prependMat(that)(combinerToScala(matF)))
/**
@@ -645,8 +651,9 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
*
* @see [[#alsoTo]]
*/
- def alsoToMat[M2, M3](that: Graph[SinkShape[Out], M2],
- matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
+ def alsoToMat[M2, M3](
+ that: Graph[SinkShape[Out], M2],
+ matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] =
new Source(delegate.alsoToMat(that)(combinerToScala(matF)))
/**
@@ -718,8 +725,9 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
*
* @see [[#merge]].
*/
- def mergeMat[T >: Out, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
+ def mergeMat[T >: Out, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] =
new Source(delegate.mergeMat(that)(combinerToScala(matF)))
/**
@@ -778,8 +786,9 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
*
* @see [[#zip]].
*/
- def zipMat[T, M, M2](that: Graph[SourceShape[T], M],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] =
+ def zipMat[T, M, M2](
+ that: Graph[SourceShape[T], M],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] =
this.viaMat(Flow.create[Out].zipMat(that, Keep.right[NotUsed, M]), matF)
/**
@@ -794,8 +803,9 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] =
new Source(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -807,9 +817,10 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
*
* @see [[#zipWith]].
*/
- def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M],
- combine: function.Function2[Out, Out2, Out3],
- matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] =
+ def zipWithMat[Out2, Out3, M, M2](
+ that: Graph[SourceShape[Out2], M],
+ combine: function.Function2[Out, Out2, Out3],
+ matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] =
new Source(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF)))
/**
@@ -877,27 +888,26 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
def recoverWith[T >: Out](pf: PartialFunction[Throwable, _ <: Graph[SourceShape[T], NotUsed]]): Source[T, Mat @uncheckedVariance] =
new Source(delegate.recoverWith(pf))
-
/**
- * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
- * a failure has been recovered up to `attempts` number of times so that each time there is a failure
- * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
- * attempt to recover at all. Passing in a negative number will behave exactly the same as `recoverWith`.
- *
- * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
- * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
- *
- * '''Emits when''' element is available from the upstream or upstream is failed and element is available
- * from alternative Source
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or upstream failed with exception pf can handle
- *
- * '''Cancels when''' downstream cancels
- *
- */
- def recoverWithRetries[T >: Out](attempts: Int, pf: PartialFunction[Throwable, _ <: Graph[SourceShape[T], NotUsed]]): Source[T, Mat @uncheckedVariance] =
+ * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
+ * a failure has been recovered up to `attempts` number of times so that each time there is a failure
+ * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
+ * attempt to recover at all. Passing in a negative number will behave exactly the same as `recoverWith`.
+ *
+ * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
+ * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
+ *
+ * '''Emits when''' element is available from the upstream or upstream is failed and element is available
+ * from alternative Source
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or upstream failed with exception pf can handle
+ *
+ * '''Cancels when''' downstream cancels
+ *
+ */
+ def recoverWithRetries[T >: Out](attempts: Int, pf: PartialFunction[Throwable, _ <: Graph[SourceShape[T], NotUsed]]): Source[T, Mat @uncheckedVariance] =
new Source(delegate.recoverWithRetries(attempts, pf))
/**
* Transform each input element into an `Iterable` of output elements that is
@@ -1827,18 +1837,18 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
new Source(delegate.idleTimeout(timeout))
/**
- * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
- * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
- * so the resolution of the check is one period (equals to timeout value).
- *
- * '''Emits when''' upstream emits an element
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
- *
- * '''Cancels when''' downstream cancels
- */
+ * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
+ * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
+ * so the resolution of the check is one period (equals to timeout value).
+ *
+ * '''Emits when''' upstream emits an element
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
+ *
+ * '''Cancels when''' downstream cancels
+ */
def backpressureTimeout(timeout: FiniteDuration): javadsl.Source[Out, Mat] =
new Source(delegate.backpressureTimeout(timeout))
@@ -1949,11 +1959,11 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
new Source(delegate.watchTermination()((left, right) ⇒ matF(left, right.toJava)))
/**
- * Materializes to `FlowMonitor[Out]` that allows monitoring of the the current flow. All events are propagated
- * by the monitor unchanged. Note that the monitor inserts a memory barrier every time it processes an
- * event, and may therefor affect performance.
- * The `combine` function is used to combine the `FlowMonitor` with this flow's materialized value.
- */
+ * Materializes to `FlowMonitor[Out]` that allows monitoring of the the current flow. All events are propagated
+ * by the monitor unchanged. Note that the monitor inserts a memory barrier every time it processes an
+ * event, and may therefor affect performance.
+ * The `combine` function is used to combine the `FlowMonitor` with this flow's materialized value.
+ */
def monitor[M]()(combine: function.Function2[Mat, FlowMonitor[Out], M]): javadsl.Source[Out, M] =
new Source(delegate.monitor()(combinerToScala(combine)))
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala
index 9b4ecb7de1..71492c1399 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala
@@ -150,57 +150,56 @@ object StreamConverters {
def asOutputStream(): javadsl.Source[ByteString, OutputStream] =
new Source(scaladsl.StreamConverters.asOutputStream())
-
/**
- * Creates a sink which materializes into Java 8 ``Stream`` that can be run to trigger demand through the sink.
- * Elements emitted through the stream will be available for reading through the Java 8 ``Stream``.
- *
- * The Java 8 ``Stream`` will be ended when the stream flowing into this ``Sink`` completes, and closing the Java
- * ``Stream`` will cancel the inflow of this ``Sink``.
- *
- * Java 8 ``Stream`` throws exception in case reactive stream failed.
- *
- * Be aware that Java ``Stream`` blocks current thread while waiting on next element from downstream.
- * As it is interacting wit blocking API the implementation runs on a separate dispatcher
- * configured through the ``akka.stream.blocking-io-dispatcher``.
- */
+ * Creates a sink which materializes into Java 8 ``Stream`` that can be run to trigger demand through the sink.
+ * Elements emitted through the stream will be available for reading through the Java 8 ``Stream``.
+ *
+ * The Java 8 ``Stream`` will be ended when the stream flowing into this ``Sink`` completes, and closing the Java
+ * ``Stream`` will cancel the inflow of this ``Sink``.
+ *
+ * Java 8 ``Stream`` throws exception in case reactive stream failed.
+ *
+ * Be aware that Java ``Stream`` blocks current thread while waiting on next element from downstream.
+ * As it is interacting wit blocking API the implementation runs on a separate dispatcher
+ * configured through the ``akka.stream.blocking-io-dispatcher``.
+ */
def asJavaStream[T](): Sink[T, java.util.stream.Stream[T]] = new Sink(scaladsl.StreamConverters.asJavaStream())
/**
- * Creates a source that wraps a Java 8 ``Stream``. ``Source`` uses a stream iterator to get all its
- * elements and send them downstream on demand.
- *
- * Example usage: `Source.fromJavaStream(() -> IntStream.rangeClosed(1, 10))`
- *
- * You can use [[Source.async]] to create asynchronous boundaries between synchronous java stream
- * and the rest of flow.
- */
+ * Creates a source that wraps a Java 8 ``Stream``. ``Source`` uses a stream iterator to get all its
+ * elements and send them downstream on demand.
+ *
+ * Example usage: `Source.fromJavaStream(() -> IntStream.rangeClosed(1, 10))`
+ *
+ * You can use [[Source.async]] to create asynchronous boundaries between synchronous java stream
+ * and the rest of flow.
+ */
def fromJavaStream[O, S <: java.util.stream.BaseStream[O, S]](stream: function.Creator[java.util.stream.BaseStream[O, S]]): javadsl.Source[O, NotUsed] =
new Source(scaladsl.StreamConverters.fromJavaStream(stream.create))
/**
- * Creates a sink which materializes into a ``CompletionStage`` which will be completed with a result of the Java 8 ``Collector``
- * transformation and reduction operations. This allows usage of Java 8 streams transformations for reactive streams.
- * The Collector`` will trigger demand downstream. Elements emitted through the stream will be accumulated into a mutable
- * result container, optionally transformed into a final representation after all input elements have been processed.
- * The ``Collector`` can also do reduction at the end. Reduction processing is performed sequentially
- *
- * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
- * to handle multiple invocations.
- */
+ * Creates a sink which materializes into a ``CompletionStage`` which will be completed with a result of the Java 8 ``Collector``
+ * transformation and reduction operations. This allows usage of Java 8 streams transformations for reactive streams.
+ * The Collector`` will trigger demand downstream. Elements emitted through the stream will be accumulated into a mutable
+ * result container, optionally transformed into a final representation after all input elements have been processed.
+ * The ``Collector`` can also do reduction at the end. Reduction processing is performed sequentially
+ *
+ * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
+ * to handle multiple invocations.
+ */
def javaCollector[T, R](collector: function.Creator[Collector[T, _ <: Any, R]]): Sink[T, CompletionStage[R]] =
new Sink(scaladsl.StreamConverters.javaCollector[T, R](() ⇒ collector.create()).toCompletionStage())
/**
- * Creates a sink which materializes into a ``CompletionStage`` which will be completed with a result of the Java 8 ``Collector``
- * transformation and reduction operations. This allows usage of Java 8 streams transformations for reactive streams.
- * The ``Collector`` will trigger demand downstream. Elements emitted through the stream will be accumulated into a mutable
- * result container, optionally transformed into a final representation after all input elements have been processed.
- * ``Collector`` can also do reduction at the end. Reduction processing is performed in parallel based on graph ``Balance``.
- *
- * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
- * to handle multiple invocations.
- */
+ * Creates a sink which materializes into a ``CompletionStage`` which will be completed with a result of the Java 8 ``Collector``
+ * transformation and reduction operations. This allows usage of Java 8 streams transformations for reactive streams.
+ * The ``Collector`` will trigger demand downstream. Elements emitted through the stream will be accumulated into a mutable
+ * result container, optionally transformed into a final representation after all input elements have been processed.
+ * ``Collector`` can also do reduction at the end. Reduction processing is performed in parallel based on graph ``Balance``.
+ *
+ * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
+ * to handle multiple invocations.
+ */
def javaCollectorParallelUnordered[T, R](parallelism: Int)(collector: function.Creator[Collector[T, _ <: Any, R]]): Sink[T, CompletionStage[R]] =
new Sink(scaladsl.StreamConverters.javaCollectorParallelUnordered[T, R](parallelism)(() ⇒ collector.create()).toCompletionStage())
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala
index 444a6c5a04..1e8e562253 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala
@@ -634,29 +634,27 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
new SubFlow(delegate.recoverWith(pf))
/**
- * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
- * a failure has been recovered up to `attempts` number of times so that each time there is a failure
- * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
- * attempt to recover at all. Passing in a negative number will behave exactly the same as `recoverWith`.
- *
- * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
- * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
- *
- * '''Emits when''' element is available from the upstream or upstream is failed and element is available
- * from alternative Source
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or upstream failed with exception pf can handle
- *
- * '''Cancels when''' downstream cancels
- *
- */
+ * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
+ * a failure has been recovered up to `attempts` number of times so that each time there is a failure
+ * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
+ * attempt to recover at all. Passing in a negative number will behave exactly the same as `recoverWith`.
+ *
+ * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
+ * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
+ *
+ * '''Emits when''' element is available from the upstream or upstream is failed and element is available
+ * from alternative Source
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or upstream failed with exception pf can handle
+ *
+ * '''Cancels when''' downstream cancels
+ *
+ */
def recoverWithRetries[T >: Out](attempts: Int, pf: PartialFunction[Throwable, _ <: Graph[SourceShape[T], NotUsed]]): SubFlow[In, T, Mat @uncheckedVariance] =
new SubFlow(delegate.recoverWithRetries(attempts, pf))
-
-
/**
* Terminate processing (and cancel the upstream publisher) after the given
* number of elements. Due to input buffering some elements may have been
@@ -1065,8 +1063,9 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] =
new SubFlow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1116,18 +1115,18 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
new SubFlow(delegate.idleTimeout(timeout))
/**
- * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
- * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
- * so the resolution of the check is one period (equals to timeout value).
- *
- * '''Emits when''' upstream emits an element
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
- *
- * '''Cancels when''' downstream cancels
- */
+ * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
+ * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
+ * so the resolution of the check is one period (equals to timeout value).
+ *
+ * '''Emits when''' upstream emits an element
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
+ *
+ * '''Cancels when''' downstream cancels
+ */
def backpressureTimeout(timeout: FiniteDuration): SubFlow[In, Out, Mat] =
new SubFlow(delegate.backpressureTimeout(timeout))
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala
index 5eaf3958b1..da0061adc9 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala
@@ -632,24 +632,24 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source
new SubSource(delegate.recoverWith(pf))
/**
- * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
- * a failure has been recovered up to `attempts` number of times so that each time there is a failure
- * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
- * attempt to recover at all. Passing in a negative number will behave exactly the same as `recoverWith`.
- *
- * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
- * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
- *
- * '''Emits when''' element is available from the upstream or upstream is failed and element is available
- * from alternative Source
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or upstream failed with exception pf can handle
- *
- * '''Cancels when''' downstream cancels
- *
- */
+ * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
+ * a failure has been recovered up to `attempts` number of times so that each time there is a failure
+ * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
+ * attempt to recover at all. Passing in a negative number will behave exactly the same as `recoverWith`.
+ *
+ * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
+ * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
+ *
+ * '''Emits when''' element is available from the upstream or upstream is failed and element is available
+ * from alternative Source
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or upstream failed with exception pf can handle
+ *
+ * '''Cancels when''' downstream cancels
+ *
+ */
def recoverWithRetries[T >: Out](attempts: Int, pf: PartialFunction[Throwable, _ <: Graph[SourceShape[T], NotUsed]]): SubSource[T, Mat @uncheckedVariance] =
new SubSource(delegate.recoverWithRetries(attempts, pf))
@@ -1062,8 +1062,9 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source
*
* '''Cancels when''' downstream cancels
*/
- def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _],
- combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] =
+ def zipWith[Out2, Out3](
+ that: Graph[SourceShape[Out2], _],
+ combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] =
new SubSource(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine)))
/**
@@ -1113,18 +1114,18 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source
new SubSource(delegate.idleTimeout(timeout))
/**
- * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
- * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
- * so the resolution of the check is one period (equals to timeout value).
- *
- * '''Emits when''' upstream emits an element
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
- *
- * '''Cancels when''' downstream cancels
- */
+ * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
+ * the stream is failed with a [[java.util.concurrent.TimeoutException]]. The timeout is checked periodically,
+ * so the resolution of the check is one period (equals to timeout value).
+ *
+ * '''Emits when''' upstream emits an element
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
+ *
+ * '''Cancels when''' downstream cancels
+ */
def backpressureTimeout(timeout: FiniteDuration): SubSource[Out, Mat] =
new SubSource(delegate.backpressureTimeout(timeout))
diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala
index d05c4a8da0..fb77dc5cb2 100644
--- a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala
+++ b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala
@@ -120,12 +120,13 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* independently whether the client is still attempting to write. This setting is recommended
* for servers, and therefore it is the default setting.
*/
- def bind(interface: String,
- port: Int,
- backlog: Int,
- options: JIterable[SocketOption],
- halfClose: Boolean,
- idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] =
+ def bind(
+ interface: String,
+ port: Int,
+ backlog: Int,
+ options: JIterable[SocketOption],
+ halfClose: Boolean,
+ idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] =
Source.fromGraph(delegate.bind(interface, port, backlog, immutableSeq(options), halfClose, idleTimeout)
.map(new IncomingConnection(_))
.mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava))
@@ -159,12 +160,13 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* If set to false, the connection will immediately closed once the client closes its write side,
* independently whether the server is still attempting to write.
*/
- def outgoingConnection(remoteAddress: InetSocketAddress,
- localAddress: Optional[InetSocketAddress],
- options: JIterable[SocketOption],
- halfClose: Boolean,
- connectTimeout: Duration,
- idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
+ def outgoingConnection(
+ remoteAddress: InetSocketAddress,
+ localAddress: Optional[InetSocketAddress],
+ options: JIterable[SocketOption],
+ halfClose: Boolean,
+ connectTimeout: Duration,
+ idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
Flow.fromGraph(delegate.outgoingConnection(remoteAddress, localAddress.asScala, immutableSeq(options), halfClose, connectTimeout, idleTimeout)
.mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava))
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala
index 7e195c0462..3bc954ebd4 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala
@@ -194,8 +194,7 @@ object BidiFlow {
def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](
flow1: Graph[FlowShape[I1, O1], M1],
flow2: Graph[FlowShape[I2, O2], M2])(combine: (M1, M2) ⇒ M): BidiFlow[I1, O1, I2, O2, M] =
- fromGraph(GraphDSL.create(flow1, flow2)(combine) {
- implicit b ⇒ (f1, f2) ⇒ BidiShape(f1.in, f1.out, f2.in, f2.out)
+ fromGraph(GraphDSL.create(flow1, flow2)(combine) { implicit b ⇒ (f1, f2) ⇒ BidiShape(f1.in, f1.out, f2.in, f2.out)
})
/**
@@ -216,8 +215,9 @@ object BidiFlow {
* }}}
*
*/
- def fromFlows[I1, O1, I2, O2, M1, M2](flow1: Graph[FlowShape[I1, O1], M1],
- flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
+ def fromFlows[I1, O1, I2, O2, M1, M2](
+ flow1: Graph[FlowShape[I1, O1], M1],
+ flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
fromFlowsMat(flow1, flow2)(Keep.none)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala
index 1a7bd530cf..12ca3f4e73 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala
@@ -3,7 +3,7 @@
*/
package akka.stream.scaladsl
-import akka.event.{Logging, LoggingAdapter}
+import akka.event.{ Logging, LoggingAdapter }
import akka.stream._
import akka.Done
import akka.stream.impl.Stages.DefaultAttributes
@@ -26,7 +26,7 @@ import akka.NotUsed
* A `Flow` is a set of stream processing steps that has one open input and one open output.
*/
final class Flow[-In, +Out, +Mat](private[stream] override val module: Module)
- extends FlowOpsMat[Out, Mat] with Graph[FlowShape[In, Out], Mat] {
+ extends FlowOpsMat[Out, Mat] with Graph[FlowShape[In, Out], Mat] {
override val shape: FlowShape[In, Out] = module.shape.asInstanceOf[FlowShape[In, Out]]
@@ -49,8 +49,8 @@ final class Flow[-In, +Out, +Mat](private[stream] override val module: Module)
val m = flow.module
val mat =
if (combine == Keep.left) {
- if (IgnorableMatValComp(m)) Ignore else Transform(_ => NotUsed, Atomic(m))
- } else Combine(combine.asInstanceOf[(Any, Any) => Any], Ignore, Atomic(m))
+ if (IgnorableMatValComp(m)) Ignore else Transform(_ ⇒ NotUsed, Atomic(m))
+ } else Combine(combine.asInstanceOf[(Any, Any) ⇒ Any], Ignore, Atomic(m))
new Flow(CompositeModule(Set(m), m.shape, empty, empty, mat, Attributes.none))
} else {
val flowCopy = flow.module.carbonCopy
@@ -434,28 +434,28 @@ trait FlowOps[+Out, +Mat] {
via(new RecoverWith(-1, pf))
/**
- * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
- * a failure has been recovered up to `attempts` number of times so that each time there is a failure
- * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
- * attempt to recover at all. Passing -1 will behave exactly the same as `recoverWith`.
- *
- * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
- * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
- *
- * '''Emits when''' element is available from the upstream or upstream is failed and element is available
- * from alternative Source
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or upstream failed with exception pf can handle
- *
- * '''Cancels when''' downstream cancels
- *
- * @param attempts Maximum number of retries or -1 to retry indefinitely
- * @param pf Receives the failure cause and returns the new Source to be materialized if any
- * @throws IllegalArgumentException if `attempts` is a negative number other than -1
- *
- */
+ * RecoverWithRetries allows to switch to alternative Source on flow failure. It will stay in effect after
+ * a failure has been recovered up to `attempts` number of times so that each time there is a failure
+ * it is fed into the `pf` and a new Source may be materialized. Note that if you pass in 0, this won't
+ * attempt to recover at all. Passing -1 will behave exactly the same as `recoverWith`.
+ *
+ * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
+ * This stage can recover the failure signal, but not the skipped elements, which will be dropped.
+ *
+ * '''Emits when''' element is available from the upstream or upstream is failed and element is available
+ * from alternative Source
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or upstream failed with exception pf can handle
+ *
+ * '''Cancels when''' downstream cancels
+ *
+ * @param attempts Maximum number of retries or -1 to retry indefinitely
+ * @param pf Receives the failure cause and returns the new Source to be materialized if any
+ * @throws IllegalArgumentException if `attempts` is a negative number other than -1
+ *
+ */
def recoverWithRetries[T >: Out](attempts: Int, pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] =
via(new RecoverWith(attempts, pf))
@@ -492,7 +492,7 @@ trait FlowOps[+Out, +Mat] {
* '''Cancels when''' downstream cancels
*
*/
- def mapConcat[T](f: Out ⇒ immutable.Iterable[T]): Repr[T] = statefulMapConcat(() => f)
+ def mapConcat[T](f: Out ⇒ immutable.Iterable[T]): Repr[T] = statefulMapConcat(() ⇒ f)
/**
* Transform each input element into an `Iterable` of output elements that is
@@ -1029,7 +1029,7 @@ trait FlowOps[+Out, +Mat] {
*
* See also [[FlowOps.conflate]], [[FlowOps.limit]], [[FlowOps.limitWeighted]] [[FlowOps.batch]] [[FlowOps.batchWeighted]]
*/
- def conflate[O2 >: Out](aggregate: (O2, O2) => O2): Repr[O2] = conflateWithSeed[O2](ConstantFun.scalaIdentityFunction)(aggregate)
+ def conflate[O2 >: Out](aggregate: (O2, O2) ⇒ O2): Repr[O2] = conflateWithSeed[O2](ConstantFun.scalaIdentityFunction)(aggregate)
/**
* Allows a faster upstream to progress independently of a slower subscriber by aggregating elements into batches
@@ -1442,18 +1442,18 @@ trait FlowOps[+Out, +Mat] {
def idleTimeout(timeout: FiniteDuration): Repr[Out] = via(new Timers.Idle[Out](timeout))
/**
- * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
- * the stream is failed with a [[scala.concurrent.TimeoutException]]. The timeout is checked periodically,
- * so the resolution of the check is one period (equals to timeout value).
- *
- * '''Emits when''' upstream emits an element
- *
- * '''Backpressures when''' downstream backpressures
- *
- * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
- *
- * '''Cancels when''' downstream cancels
- */
+ * If the time between the emission of an element and the following downstream demand exceeds the provided timeout,
+ * the stream is failed with a [[scala.concurrent.TimeoutException]]. The timeout is checked periodically,
+ * so the resolution of the check is one period (equals to timeout value).
+ *
+ * '''Emits when''' upstream emits an element
+ *
+ * '''Backpressures when''' downstream backpressures
+ *
+ * '''Completes when''' upstream completes or fails if timeout elapses between element emission and downstream demand.
+ *
+ * '''Cancels when''' downstream cancels
+ */
def backpressureTimeout(timeout: FiniteDuration): Repr[Out] = via(new Timers.BackpressureTimeout[Out](timeout))
/**
@@ -1600,11 +1600,10 @@ trait FlowOps[+Out, +Mat] {
def zip[U](that: Graph[SourceShape[U], _]): Repr[(Out, U)] = via(zipGraph(that))
protected def zipGraph[U, M](that: Graph[SourceShape[U], M]): Graph[FlowShape[Out @uncheckedVariance, (Out, U)], M] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- val zip = b.add(Zip[Out, U]())
- r ~> zip.in1
- FlowShape(zip.in0, zip.out)
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ val zip = b.add(Zip[Out, U]())
+ r ~> zip.in1
+ FlowShape(zip.in0, zip.out)
}
/**
@@ -1623,11 +1622,10 @@ trait FlowOps[+Out, +Mat] {
via(zipWithGraph(that)(combine))
protected def zipWithGraph[Out2, Out3, M](that: Graph[SourceShape[Out2], M])(combine: (Out, Out2) ⇒ Out3): Graph[FlowShape[Out @uncheckedVariance, Out3], M] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- val zip = b.add(ZipWith[Out, Out2, Out3](combine))
- r ~> zip.in1
- FlowShape(zip.in0, zip.out)
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ val zip = b.add(ZipWith[Out, Out2, Out3](combine))
+ r ~> zip.in1
+ FlowShape(zip.in0, zip.out)
}
/**
@@ -1656,13 +1654,13 @@ trait FlowOps[+Out, +Mat] {
def interleave[U >: Out](that: Graph[SourceShape[U], _], segmentSize: Int): Repr[U] =
via(interleaveGraph(that, segmentSize))
- protected def interleaveGraph[U >: Out, M](that: Graph[SourceShape[U], M],
- segmentSize: Int): Graph[FlowShape[Out @uncheckedVariance, U], M] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- val interleave = b.add(Interleave[U](2, segmentSize))
- r ~> interleave.in(1)
- FlowShape(interleave.in(0), interleave.out)
+ protected def interleaveGraph[U >: Out, M](
+ that: Graph[SourceShape[U], M],
+ segmentSize: Int): Graph[FlowShape[Out @uncheckedVariance, U], M] =
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ val interleave = b.add(Interleave[U](2, segmentSize))
+ r ~> interleave.in(1)
+ FlowShape(interleave.in(0), interleave.out)
}
/**
@@ -1681,11 +1679,10 @@ trait FlowOps[+Out, +Mat] {
via(mergeGraph(that, eagerComplete))
protected def mergeGraph[U >: Out, M](that: Graph[SourceShape[U], M], eagerComplete: Boolean): Graph[FlowShape[Out @uncheckedVariance, U], M] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- val merge = b.add(Merge[U](2, eagerComplete))
- r ~> merge.in(1)
- FlowShape(merge.in(0), merge.out)
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ val merge = b.add(Merge[U](2, eagerComplete))
+ r ~> merge.in(1)
+ FlowShape(merge.in(0), merge.out)
}
/**
@@ -1707,11 +1704,10 @@ trait FlowOps[+Out, +Mat] {
via(mergeSortedGraph(that))
protected def mergeSortedGraph[U >: Out, M](that: Graph[SourceShape[U], M])(implicit ord: Ordering[U]): Graph[FlowShape[Out @uncheckedVariance, U], M] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- val merge = b.add(new MergeSorted[U])
- r ~> merge.in1
- FlowShape(merge.in0, merge.out)
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ val merge = b.add(new MergeSorted[U])
+ r ~> merge.in1
+ FlowShape(merge.in0, merge.out)
}
/**
@@ -1736,11 +1732,10 @@ trait FlowOps[+Out, +Mat] {
via(concatGraph(that))
protected def concatGraph[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- val merge = b.add(Concat[U]())
- r ~> merge.in(1)
- FlowShape(merge.in(0), merge.out)
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ val merge = b.add(Concat[U]())
+ r ~> merge.in(1)
+ FlowShape(merge.in(0), merge.out)
}
/**
@@ -1765,11 +1760,10 @@ trait FlowOps[+Out, +Mat] {
via(prependGraph(that))
protected def prependGraph[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- val merge = b.add(Concat[U]())
- r ~> merge.in(0)
- FlowShape(merge.in(1), merge.out)
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ val merge = b.add(Concat[U]())
+ r ~> merge.in(0)
+ FlowShape(merge.in(1), merge.out)
}
/**
@@ -1815,12 +1809,11 @@ trait FlowOps[+Out, +Mat] {
def alsoTo(that: Graph[SinkShape[Out], _]): Repr[Out] = via(alsoToGraph(that))
protected def alsoToGraph[M](that: Graph[SinkShape[Out], M]): Graph[FlowShape[Out @uncheckedVariance, Out], M] =
- GraphDSL.create(that) { implicit b ⇒
- r ⇒
- import GraphDSL.Implicits._
- val bcast = b.add(Broadcast[Out](2))
- bcast.out(1) ~> r
- FlowShape(bcast.in, bcast.out(0))
+ GraphDSL.create(that) { implicit b ⇒ r ⇒
+ import GraphDSL.Implicits._
+ val bcast = b.add(Broadcast[Out](2))
+ bcast.out(1) ~> r
+ FlowShape(bcast.in, bcast.out(0))
}
def withAttributes(attr: Attributes): Repr[Out]
@@ -1837,10 +1830,10 @@ trait FlowOps[+Out, +Mat] {
}
/**
- * INTERNAL API: this trait will be changed in binary-incompatible ways for classes that are derived from it!
- * Do not implement this interface outside the Akka code base!
- *
- * Binary compatibility is only maintained for callers of this trait’s interface.
+ * INTERNAL API: this trait will be changed in binary-incompatible ways for classes that are derived from it!
+ * Do not implement this interface outside the Akka code base!
+ *
+ * Binary compatibility is only maintained for callers of this trait’s interface.
*/
trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] {
@@ -2033,12 +2026,12 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] {
def mapMaterializedValue[Mat2](f: Mat ⇒ Mat2): ReprMat[Out, Mat2]
/**
- * Materializes to `FlowMonitor[Out]` that allows monitoring of the the current flow. All events are propagated
- * by the monitor unchanged. Note that the monitor inserts a memory barrier every time it processes an
- * event, and may therefor affect performance.
- * The `combine` function is used to combine the `FlowMonitor` with this flow's materialized value.
- */
- def monitor[Mat2]()(combine: (Mat, FlowMonitor[Out]) => Mat2): ReprMat[Out, Mat2] =
+ * Materializes to `FlowMonitor[Out]` that allows monitoring of the the current flow. All events are propagated
+ * by the monitor unchanged. Note that the monitor inserts a memory barrier every time it processes an
+ * event, and may therefor affect performance.
+ * The `combine` function is used to combine the `FlowMonitor` with this flow's materialized value.
+ */
+ def monitor[Mat2]()(combine: (Mat, FlowMonitor[Out]) ⇒ Mat2): ReprMat[Out, Mat2] =
viaMat(GraphStages.monitor)(combine)
/**
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala
index b6527ad59f..7ff38cae36 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala
@@ -48,10 +48,11 @@ object Framing {
* the length of the size field)
* @param byteOrder The ''ByteOrder'' to be used when decoding the field
*/
- def lengthField(fieldLength: Int,
- fieldOffset: Int = 0,
- maximumFrameLength: Int,
- byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = {
+ def lengthField(
+ fieldLength: Int,
+ fieldOffset: Int = 0,
+ maximumFrameLength: Int,
+ byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = {
require(fieldLength >= 1 && fieldLength <= 4, "Length field length must be 1, 2, 3 or 4.")
Flow[ByteString].via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder))
.named("lengthFieldFraming")
@@ -209,10 +210,10 @@ object Framing {
}
private final class LengthFieldFramingStage(
- val lengthFieldLength: Int,
- val lengthFieldOffset: Int,
+ val lengthFieldLength: Int,
+ val lengthFieldOffset: Int,
val maximumFrameLength: Int,
- val byteOrder: ByteOrder) extends GraphStage[FlowShape[ByteString, ByteString]] {
+ val byteOrder: ByteOrder) extends GraphStage[FlowShape[ByteString, ByteString]] {
private val minimumChunkSize = lengthFieldOffset + lengthFieldLength
private val intDecoder = byteOrder match {
case ByteOrder.BIG_ENDIAN ⇒ bigEndianDecoder
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala
index d30a726c1c..8c73568d52 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala
@@ -761,7 +761,7 @@ object ZipWithN {
/**
* Create a new `ZipWithN`.
*/
- def apply[A, O](zipper: immutable.Seq[A] => O)(n: Int) = new ZipWithN[A, O](zipper)(n)
+ def apply[A, O](zipper: immutable.Seq[A] ⇒ O)(n: Int) = new ZipWithN[A, O](zipper)(n)
}
/**
@@ -777,7 +777,7 @@ object ZipWithN {
*
* '''Cancels when''' downstream cancels
*/
-class ZipWithN[A, O](zipper: immutable.Seq[A] => O)(n: Int) extends GraphStage[UniformFanInShape[A, O]] {
+class ZipWithN[A, O](zipper: immutable.Seq[A] ⇒ O)(n: Int) extends GraphStage[UniformFanInShape[A, O]] {
override def initialAttributes = DefaultAttributes.zipWithN
override val shape = new UniformFanInShape[A, O](n)
def out = shape.out
@@ -801,7 +801,7 @@ class ZipWithN[A, O](zipper: immutable.Seq[A] => O)(n: Int) extends GraphStage[U
inSeq.foreach(pullInlet)
}
- inSeq.foreach(in => {
+ inSeq.foreach(in ⇒ {
setHandler(in, new InHandler {
override def onPush(): Unit = {
pending -= 1
@@ -1096,7 +1096,7 @@ object GraphDSL extends GraphApply {
}
private class PortOpsImpl[+Out](override val outlet: Outlet[Out @uncheckedVariance], b: Builder[_])
- extends PortOps[Out] {
+ extends PortOps[Out] {
override def withAttributes(attr: Attributes): Repr[Out] = throw settingAttrNotSupported
override def addAttributes(attr: Attributes): Repr[Out] = throw settingAttrNotSupported
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala
index b9a1408a24..3da00a38cb 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala
@@ -6,16 +6,16 @@ package akka.stream.scaladsl
import java.io.{ OutputStream, InputStream }
import java.util.Spliterators
import java.util.concurrent.atomic.AtomicReference
-import java.util.stream.{Collector, StreamSupport}
+import java.util.stream.{ Collector, StreamSupport }
-import akka.stream.{Attributes, SinkShape, IOResult}
+import akka.stream.{ Attributes, SinkShape, IOResult }
import akka.stream.impl._
import akka.stream.impl.Stages.DefaultAttributes
import akka.stream.impl.io.{ InputStreamSinkStage, OutputStreamSink, OutputStreamSourceStage, InputStreamSource }
import akka.util.ByteString
import scala.concurrent.duration.Duration._
-import scala.concurrent.{Await, Future}
+import scala.concurrent.{ Await, Future }
import scala.concurrent.duration._
import akka.NotUsed
@@ -97,101 +97,100 @@ object StreamConverters {
Sink.fromGraph(new InputStreamSinkStage(readTimeout))
/**
- * Creates a sink which materializes into a ``Future`` which will be completed with result of the Java 8 ``Collector`` transformation
- * and reduction operations. This allows usage of Java 8 streams transformations for reactive streams. The ``Collector`` will trigger
- * demand downstream. Elements emitted through the stream will be accumulated into a mutable result container, optionally transformed
- * into a final representation after all input elements have been processed. The ``Collector`` can also do reduction
- * at the end. Reduction processing is performed sequentially
- *
- * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
- * to handle multiple invocations.
- */
+ * Creates a sink which materializes into a ``Future`` which will be completed with result of the Java 8 ``Collector`` transformation
+ * and reduction operations. This allows usage of Java 8 streams transformations for reactive streams. The ``Collector`` will trigger
+ * demand downstream. Elements emitted through the stream will be accumulated into a mutable result container, optionally transformed
+ * into a final representation after all input elements have been processed. The ``Collector`` can also do reduction
+ * at the end. Reduction processing is performed sequentially
+ *
+ * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
+ * to handle multiple invocations.
+ */
def javaCollector[T, R](collectorFactory: () ⇒ java.util.stream.Collector[T, _ <: Any, R]): Sink[T, Future[R]] =
Flow[T].fold(() ⇒
- new CollectorState[T,R](collectorFactory().asInstanceOf[Collector[T, Any, R]])) { (state, elem) ⇒ () ⇒ state().update(elem) }
+ new CollectorState[T, R](collectorFactory().asInstanceOf[Collector[T, Any, R]])) { (state, elem) ⇒ () ⇒ state().update(elem) }
.map(state ⇒ state().finish())
.toMat(Sink.head)(Keep.right).withAttributes(DefaultAttributes.javaCollector)
/**
- * Creates a sink which materializes into a ``Future`` which will be completed with result of the Java 8 ``Collector`` transformation
- * and reduction operations. This allows usage of Java 8 streams transformations for reactive streams. The ``Collector`` will trigger demand
- * downstream. Elements emitted through the stream will be accumulated into a mutable result container, optionally transformed
- * into a final representation after all input elements have been processed. The ``Collector`` can also do reduction
- * at the end. Reduction processing is performed in parallel based on graph ``Balance``.
- *
- * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
- * to handle multiple invocations.
- */
+ * Creates a sink which materializes into a ``Future`` which will be completed with result of the Java 8 ``Collector`` transformation
+ * and reduction operations. This allows usage of Java 8 streams transformations for reactive streams. The ``Collector`` will trigger demand
+ * downstream. Elements emitted through the stream will be accumulated into a mutable result container, optionally transformed
+ * into a final representation after all input elements have been processed. The ``Collector`` can also do reduction
+ * at the end. Reduction processing is performed in parallel based on graph ``Balance``.
+ *
+ * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able
+ * to handle multiple invocations.
+ */
def javaCollectorParallelUnordered[T, R](parallelism: Int)(collectorFactory: () ⇒ java.util.stream.Collector[T, _ <: Any, R]): Sink[T, Future[R]] = {
if (parallelism == 1) javaCollector[T, R](collectorFactory)
else {
- Sink.fromGraph(GraphDSL.create(Sink.head[R]) { implicit b ⇒
- sink ⇒
- import GraphDSL.Implicits._
- val collector = collectorFactory().asInstanceOf[Collector[T, Any, R]]
- val balance = b.add(Balance[T](parallelism))
- val merge = b.add(Merge[() ⇒ CollectorState[T, R]](parallelism))
+ Sink.fromGraph(GraphDSL.create(Sink.head[R]) { implicit b ⇒ sink ⇒
+ import GraphDSL.Implicits._
+ val collector = collectorFactory().asInstanceOf[Collector[T, Any, R]]
+ val balance = b.add(Balance[T](parallelism))
+ val merge = b.add(Merge[() ⇒ CollectorState[T, R]](parallelism))
- for (i ← 0 until parallelism) {
- val worker = Flow[T]
- .fold(() => new CollectorState(collector)) { (state, elem) ⇒ () ⇒ state().update(elem) }
- .async
+ for (i ← 0 until parallelism) {
+ val worker = Flow[T]
+ .fold(() ⇒ new CollectorState(collector)) { (state, elem) ⇒ () ⇒ state().update(elem) }
+ .async
- balance.out(i) ~> worker ~> merge.in(i)
- }
+ balance.out(i) ~> worker ~> merge.in(i)
+ }
- merge.out
- .fold(() => new ReducerState(collector)) { (state, elem) ⇒ () ⇒ state().update(elem().accumulated) }
- .map(state => state().finish()) ~> sink.in
+ merge.out
+ .fold(() ⇒ new ReducerState(collector)) { (state, elem) ⇒ () ⇒ state().update(elem().accumulated) }
+ .map(state ⇒ state().finish()) ~> sink.in
- SinkShape(balance.in)
+ SinkShape(balance.in)
}).withAttributes(DefaultAttributes.javaCollectorParallelUnordered)
}
}
/**
- * Creates a sink which materializes into Java 8 ``Stream`` that can be run to trigger demand through the sink.
- * Elements emitted through the stream will be available for reading through the Java 8 ``Stream``.
- *
- * The Java 8 ``Stream`` will be ended when the stream flowing into this ``Sink`` completes, and closing the Java
- * ``Stream`` will cancel the inflow of this ``Sink``.
- *
- * Java 8 ``Stream`` throws exception in case reactive stream failed.
- *
- * Be aware that Java ``Stream`` blocks current thread while waiting on next element from downstream.
- * As it is interacting wit blocking API the implementation runs on a separate dispatcher
- * configured through the ``akka.stream.blocking-io-dispatcher``.
- */
+ * Creates a sink which materializes into Java 8 ``Stream`` that can be run to trigger demand through the sink.
+ * Elements emitted through the stream will be available for reading through the Java 8 ``Stream``.
+ *
+ * The Java 8 ``Stream`` will be ended when the stream flowing into this ``Sink`` completes, and closing the Java
+ * ``Stream`` will cancel the inflow of this ``Sink``.
+ *
+ * Java 8 ``Stream`` throws exception in case reactive stream failed.
+ *
+ * Be aware that Java ``Stream`` blocks current thread while waiting on next element from downstream.
+ * As it is interacting wit blocking API the implementation runs on a separate dispatcher
+ * configured through the ``akka.stream.blocking-io-dispatcher``.
+ */
def asJavaStream[T](): Sink[T, java.util.stream.Stream[T]] = {
Sink.fromGraph(new QueueSink[T]())
.mapMaterializedValue(queue ⇒ StreamSupport.stream(
- Spliterators.spliteratorUnknownSize(new java.util.Iterator[T] {
- var nextElementFuture: Future[Option[T]] = queue.pull()
- var nextElement: Option[T] = null
+ Spliterators.spliteratorUnknownSize(new java.util.Iterator[T] {
+ var nextElementFuture: Future[Option[T]] = queue.pull()
+ var nextElement: Option[T] = null
- override def hasNext: Boolean = {
- nextElement = Await.result(nextElementFuture, Inf)
- nextElement.isDefined
- }
+ override def hasNext: Boolean = {
+ nextElement = Await.result(nextElementFuture, Inf)
+ nextElement.isDefined
+ }
- override def next(): T = {
- val next = nextElement.get
- nextElementFuture = queue.pull()
- next
- }
- }, 0), false).onClose(new Runnable { def run = queue.cancel() }))
- .withAttributes(DefaultAttributes.asJavaStream)
+ override def next(): T = {
+ val next = nextElement.get
+ nextElementFuture = queue.pull()
+ next
+ }
+ }, 0), false).onClose(new Runnable { def run = queue.cancel() }))
+ .withAttributes(DefaultAttributes.asJavaStream)
}
/**
- * Creates a source that wraps a Java 8 ``Stream``. ``Source`` uses a stream iterator to get all its
- * elements and send them downstream on demand.
- *
- * Example usage: `Source.fromJavaStream(() ⇒ IntStream.rangeClosed(1, 10))`
- *
- * You can use [[Source.async]] to create asynchronous boundaries between synchronous Java ``Stream``
- * and the rest of flow.
- */
+ * Creates a source that wraps a Java 8 ``Stream``. ``Source`` uses a stream iterator to get all its
+ * elements and send them downstream on demand.
+ *
+ * Example usage: `Source.fromJavaStream(() ⇒ IntStream.rangeClosed(1, 10))`
+ *
+ * You can use [[Source.async]] to create asynchronous boundaries between synchronous Java ``Stream``
+ * and the rest of flow.
+ */
def fromJavaStream[T, S <: java.util.stream.BaseStream[T, S]](stream: () ⇒ java.util.stream.BaseStream[T, S]): Source[T, NotUsed] = {
import scala.collection.JavaConverters._
Source.fromIterator(() ⇒ stream().iterator().asScala).withAttributes(DefaultAttributes.fromJavaStream)
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala
index 496dfafb15..ca54ea50c1 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala
@@ -63,10 +63,11 @@ object TLS {
* The SSLEngine may use this information e.g. when an endpoint identification algorithm was
* configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]].
*/
- def apply(sslContext: SSLContext,
- sslConfig: Option[AkkaSSLConfig],
- firstSession: NegotiateNewSession, role: TLSRole,
- closing: TLSClosing = IgnoreComplete, hostInfo: Option[(String, Int)] = None): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def apply(
+ sslContext: SSLContext,
+ sslConfig: Option[AkkaSSLConfig],
+ firstSession: NegotiateNewSession, role: TLSRole,
+ closing: TLSClosing = IgnoreComplete, hostInfo: Option[(String, Int)] = None): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new scaladsl.BidiFlow(TlsModule(Attributes.none, sslContext, sslConfig, firstSession, role, closing, hostInfo))
/**
@@ -85,9 +86,10 @@ object TLS {
* The SSLEngine may use this information e.g. when an endpoint identification algorithm was
* configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]].
*/
- def apply(sslContext: SSLContext,
- firstSession: NegotiateNewSession, role: TLSRole,
- closing: TLSClosing, hostInfo: Option[(String, Int)]): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def apply(
+ sslContext: SSLContext,
+ firstSession: NegotiateNewSession, role: TLSRole,
+ closing: TLSClosing, hostInfo: Option[(String, Int)]): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new scaladsl.BidiFlow(TlsModule(Attributes.none, sslContext, None, firstSession, role, closing, hostInfo))
/**
@@ -106,8 +108,9 @@ object TLS {
* The SSLEngine may use this information e.g. when an endpoint identification algorithm was
* configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]].
*/
- def apply(sslContext: SSLContext,
- firstSession: NegotiateNewSession, role: TLSRole): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
+ def apply(
+ sslContext: SSLContext,
+ firstSession: NegotiateNewSession, role: TLSRole): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] =
new scaladsl.BidiFlow(TlsModule(Attributes.none, sslContext, None, firstSession, role, IgnoreComplete, None))
}
diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala
index dc03455ed9..bdcbf0ad99 100644
--- a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala
+++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala
@@ -31,9 +31,9 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider {
* Represents an accepted incoming TCP connection.
*/
final case class IncomingConnection(
- localAddress: InetSocketAddress,
+ localAddress: InetSocketAddress,
remoteAddress: InetSocketAddress,
- flow: Flow[ByteString, ByteString, NotUsed]) {
+ flow: Flow[ByteString, ByteString, NotUsed]) {
/**
* Handles the connection using the given flow, which is materialized exactly once and the respective
@@ -87,12 +87,13 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* independently whether the client is still attempting to write. This setting is recommended
* for servers, and therefore it is the default setting.
*/
- def bind(interface: String,
- port: Int,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- halfClose: Boolean = false,
- idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] =
+ def bind(
+ interface: String,
+ port: Int,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ halfClose: Boolean = false,
+ idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] =
Source.fromGraph(new ConnectionSourceStage(
IO(IoTcp)(system),
new InetSocketAddress(interface, port),
@@ -126,13 +127,13 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* for servers, and therefore it is the default setting.
*/
def bindAndHandle(
- handler: Flow[ByteString, ByteString, _],
- interface: String,
- port: Int,
- backlog: Int = 100,
- options: immutable.Traversable[SocketOption] = Nil,
- halfClose: Boolean = false,
- idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = {
+ handler: Flow[ByteString, ByteString, _],
+ interface: String,
+ port: Int,
+ backlog: Int = 100,
+ options: immutable.Traversable[SocketOption] = Nil,
+ halfClose: Boolean = false,
+ idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = {
bind(interface, port, backlog, options, halfClose, idleTimeout).to(Sink.foreach { conn: IncomingConnection ⇒
conn.flow.join(handler).run()
}).run()
@@ -154,12 +155,13 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
* If set to false, the connection will immediately closed once the client closes its write side,
* independently whether the server is still attempting to write.
*/
- def outgoingConnection(remoteAddress: InetSocketAddress,
- localAddress: Option[InetSocketAddress] = None,
- options: immutable.Traversable[SocketOption] = Nil,
- halfClose: Boolean = true,
- connectTimeout: Duration = Duration.Inf,
- idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = {
+ def outgoingConnection(
+ remoteAddress: InetSocketAddress,
+ localAddress: Option[InetSocketAddress] = None,
+ options: immutable.Traversable[SocketOption] = Nil,
+ halfClose: Boolean = true,
+ connectTimeout: Duration = Duration.Inf,
+ idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = {
val tcpFlow = Flow.fromGraph(new OutgoingConnectionStage(
IO(IoTcp)(system),
diff --git a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala
index 3b0ce2f82d..c6bf767138 100644
--- a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala
+++ b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala
@@ -123,9 +123,10 @@ object GraphStageLogic {
/**
* Minimal actor to work with other actors and watch them in a synchronous ways
*/
- final class StageActor(materializer: ActorMaterializer,
- getAsyncCallback: StageActorRef.Receive ⇒ AsyncCallback[(ActorRef, Any)],
- initialReceive: StageActorRef.Receive) {
+ final class StageActor(
+ materializer: ActorMaterializer,
+ getAsyncCallback: StageActorRef.Receive ⇒ AsyncCallback[(ActorRef, Any)],
+ initialReceive: StageActorRef.Receive) {
private val callback = getAsyncCallback(internalReceive)
private def cell = materializer.supervisor match {
@@ -1149,9 +1150,9 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap
* adding the new timer.
*/
final protected def schedulePeriodicallyWithInitialDelay(
- timerKey: Any,
+ timerKey: Any,
initialDelay: FiniteDuration,
- interval: FiniteDuration): Unit = {
+ interval: FiniteDuration): Unit = {
cancelTimer(timerKey)
val id = timerIdGen.next()
val task = interpreter.materializer.schedulePeriodically(initialDelay, interval, new Runnable {
diff --git a/akka-stream/src/main/scala/akka/stream/stage/Stage.scala b/akka-stream/src/main/scala/akka/stream/stage/Stage.scala
index d65ae4fbd9..1ddb08d392 100644
--- a/akka-stream/src/main/scala/akka/stream/stage/Stage.scala
+++ b/akka-stream/src/main/scala/akka/stream/stage/Stage.scala
@@ -38,8 +38,8 @@ private[stream] object AbstractStage {
private class PushPullGraphLogic[In, Out](
private val shape: FlowShape[In, Out],
- val attributes: Attributes,
- val stage: AbstractStage[In, Out, Directive, Directive, Context[Out], LifecycleContext])
+ val attributes: Attributes,
+ val stage: AbstractStage[In, Out, Directive, Directive, Context[Out], LifecycleContext])
extends GraphStageLogic(shape) with DetachedContext[Out] {
final override def materializer: Materializer = interpreter.materializer
@@ -163,7 +163,7 @@ private[stream] object AbstractStage {
}
class PushPullGraphStageWithMaterializedValue[-In, +Out, Ext, +Mat](
- val factory: (Attributes) ⇒ (Stage[In, Out], Mat),
+ val factory: (Attributes) ⇒ (Stage[In, Out], Mat),
stageAttributes: Attributes)
extends GraphStageWithMaterializedValue[FlowShape[In, Out], Mat] {
diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
index a27a795d55..3167b4f0fb 100644
--- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala
@@ -52,16 +52,16 @@ private[testkit] class CallingThreadDispatcherQueues extends Extension {
queues = (Map.newBuilder[CallingThreadMailbox, Set[WeakReference[MessageQueue]]] /: queues) {
case (m, (k, v)) ⇒
val nv = v filter (_.get ne null)
- if (nv.isEmpty) m else m += (k -> nv)
+ if (nv.isEmpty) m else m += (k → nv)
}.result
}
protected[akka] def registerQueue(mbox: CallingThreadMailbox, q: MessageQueue): Unit = synchronized {
if (queues contains mbox) {
val newSet = queues(mbox) + new WeakReference(q)
- queues += mbox -> newSet
+ queues += mbox → newSet
} else {
- queues += mbox -> Set(new WeakReference(q))
+ queues += mbox → Set(new WeakReference(q))
}
val now = System.nanoTime
if (now - lastGC > 1000000000l) {
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
index aa63c9dcf4..0de892a837 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala
@@ -19,10 +19,10 @@ import akka.pattern.ask
* @since 1.1
*/
class TestActorRef[T <: Actor](
- _system: ActorSystem,
- _props: Props,
+ _system: ActorSystem,
+ _props: Props,
_supervisor: ActorRef,
- name: String)
+ name: String)
extends {
val props =
_props.withDispatcher(
@@ -149,7 +149,8 @@ object TestActorRef {
def apply[T <: Actor](implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = apply[T](randomName)
private def dynamicCreateRecover[U]: PartialFunction[Throwable, U] = {
- case exception ⇒ throw ActorInitializationException(null,
+ case exception ⇒ throw ActorInitializationException(
+ null,
"Could not instantiate Actor" +
"\nMake sure Actor is NOT defined inside a class/trait," +
"\nif so put it outside the class/trait, f.e. in a companion object," +
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala
index 1ad68acc04..b93d374989 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala
@@ -95,7 +95,8 @@ abstract class EventFilter(occurrences: Int) {
* `occurrences` parameter specifies.
*/
def assertDone(max: Duration): Unit =
- assert(awaitDone(max),
+ assert(
+ awaitDone(max),
if (todo > 0) s"$todo messages outstanding on $this"
else s"received ${-todo} excess messages on $this")
@@ -199,7 +200,8 @@ object EventFilter {
* source filter).''
*/
def warning(message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter =
- WarningFilter(Option(source),
+ WarningFilter(
+ Option(source),
if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start,
message ne null)(occurrences)
@@ -218,7 +220,8 @@ object EventFilter {
* source filter).''
*/
def info(message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter =
- InfoFilter(Option(source),
+ InfoFilter(
+ Option(source),
if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start,
message ne null)(occurrences)
@@ -237,7 +240,8 @@ object EventFilter {
* source filter).''
*/
def debug(message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter =
- DebugFilter(Option(source),
+ DebugFilter(
+ Option(source),
if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start,
message ne null)(occurrences)
@@ -271,9 +275,9 @@ object EventFilter {
* If you want to match all Error events, the most efficient is to use Left("").
*/
final case class ErrorFilter(
- throwable: Class[_],
- override val source: Option[String],
- override val message: Either[String, Regex],
+ throwable: Class[_],
+ override val source: Option[String],
+ override val message: Either[String, Regex],
override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -323,8 +327,8 @@ final case class ErrorFilter(
* If you want to match all Warning events, the most efficient is to use Left("").
*/
final case class WarningFilter(
- override val source: Option[String],
- override val message: Either[String, Regex],
+ override val source: Option[String],
+ override val message: Either[String, Regex],
override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -350,7 +354,8 @@ final case class WarningFilter(
* whether the event’s message must match the given message string or pattern completely
*/
def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) =
- this(Option(source),
+ this(
+ Option(source),
if (message eq null) Left("")
else if (pattern) Right(new Regex(message))
else Left(message),
@@ -366,8 +371,8 @@ final case class WarningFilter(
* If you want to match all Info events, the most efficient is to use Left("").
*/
final case class InfoFilter(
- override val source: Option[String],
- override val message: Either[String, Regex],
+ override val source: Option[String],
+ override val message: Either[String, Regex],
override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -393,7 +398,8 @@ final case class InfoFilter(
* whether the event’s message must match the given message string or pattern completely
*/
def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) =
- this(Option(source),
+ this(
+ Option(source),
if (message eq null) Left("")
else if (pattern) Right(new Regex(message))
else Left(message),
@@ -409,8 +415,8 @@ final case class InfoFilter(
* If you want to match all Debug events, the most efficient is to use Left("").
*/
final case class DebugFilter(
- override val source: Option[String],
- override val message: Either[String, Regex],
+ override val source: Option[String],
+ override val message: Either[String, Regex],
override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) {
def matches(event: LogEvent) = {
@@ -436,7 +442,8 @@ final case class DebugFilter(
* whether the event’s message must match the given message string or pattern completely
*/
def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) =
- this(Option(source),
+ this(
+ Option(source),
if (message eq null) Left("")
else if (pattern) Right(new Regex(message))
else Left(message),
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala
index 36be493c82..72bee30571 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala
@@ -33,10 +33,10 @@ import scala.reflect.ClassTag
* @since 1.2
*/
class TestFSMRef[S, D, T <: Actor](
- system: ActorSystem,
- props: Props,
+ system: ActorSystem,
+ props: Props,
supervisor: ActorRef,
- name: String)(implicit ev: T <:< FSM[S, D])
+ name: String)(implicit ev: T <:< FSM[S, D])
extends TestActorRef[T](system, props, supervisor, name) {
private def fsm: T = underlyingActor
diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala
index c92392427e..2016816ae6 100644
--- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala
@@ -123,8 +123,9 @@ trait TestKitBase {
*/
val testActor: ActorRef = {
val impl = system.asInstanceOf[ExtendedActorSystem]
- val ref = impl.systemActorOf(TestActor.props(queue)
- .withDispatcher(CallingThreadDispatcher.Id),
+ val ref = impl.systemActorOf(
+ TestActor.props(queue)
+ .withDispatcher(CallingThreadDispatcher.Id),
"%s-%d".format(testActorName, TestKit.testActorId.incrementAndGet))
awaitCond(ref match {
case r: RepointableRef ⇒ r.isStarted
@@ -500,7 +501,8 @@ trait TestKitBase {
private def checkMissingAndUnexpected(missing: Seq[Any], unexpected: Seq[Any],
missingMessage: String, unexpectedMessage: String): Unit = {
- assert(missing.isEmpty && unexpected.isEmpty,
+ assert(
+ missing.isEmpty && unexpected.isEmpty,
(if (missing.isEmpty) "" else missing.mkString(missingMessage + " [", ", ", "] ")) +
(if (unexpected.isEmpty) "" else unexpected.mkString(unexpectedMessage + " [", ", ", "]")))
}
@@ -679,9 +681,10 @@ trait TestKitBase {
*
* If verifySystemShutdown is true, then an exception will be thrown on failure.
*/
- def shutdown(actorSystem: ActorSystem = system,
- duration: Duration = 5.seconds.dilated.min(10.seconds),
- verifySystemShutdown: Boolean = false) {
+ def shutdown(
+ actorSystem: ActorSystem = system,
+ duration: Duration = 5.seconds.dilated.min(10.seconds),
+ verifySystemShutdown: Boolean = false) {
TestKit.shutdownActorSystem(actorSystem, duration, verifySystemShutdown)
}
@@ -771,9 +774,10 @@ object TestKit {
*
* If verifySystemShutdown is true, then an exception will be thrown on failure.
*/
- def shutdownActorSystem(actorSystem: ActorSystem,
- duration: Duration = 10.seconds,
- verifySystemShutdown: Boolean = false): Unit = {
+ def shutdownActorSystem(
+ actorSystem: ActorSystem,
+ duration: Duration = 10.seconds,
+ verifySystemShutdown: Boolean = false): Unit = {
actorSystem.terminate()
try Await.ready(actorSystem.whenTerminated, duration) catch {
case _: TimeoutException ⇒
diff --git a/akka-testkit/src/main/scala/akka/testkit/package.scala b/akka-testkit/src/main/scala/akka/testkit/package.scala
index 46bd5ea61f..c073e59357 100644
--- a/akka-testkit/src/main/scala/akka/testkit/package.scala
+++ b/akka-testkit/src/main/scala/akka/testkit/package.scala
@@ -3,7 +3,6 @@
*/
package akka
-
import akka.actor.ActorSystem
import scala.concurrent.duration.{ Duration, FiniteDuration }
import scala.reflect.ClassTag
diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala
index c7cda3df81..7a1675b405 100644
--- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala
@@ -55,12 +55,13 @@ object AkkaSpec {
}
abstract class AkkaSpec(_system: ActorSystem)
- extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll with WatchedByCoroner
- with ConversionCheckedTripleEquals with ScalaFutures {
+ extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll with WatchedByCoroner
+ with ConversionCheckedTripleEquals with ScalaFutures {
implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration)
- def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(getClass),
+ def this(config: Config) = this(ActorSystem(
+ AkkaSpec.getCallerName(getClass),
ConfigFactory.load(config.withFallback(AkkaSpec.testConf))))
def this(s: String) = this(ConfigFactory.parseString(s))
diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala
index e24a4d646e..e9a2b051ca 100644
--- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala
@@ -34,8 +34,8 @@ class AkkaSpecSpec extends WordSpec with Matchers {
// verbose config just for demonstration purposes, please leave in in case of debugging
import scala.collection.JavaConverters._
val conf = Map(
- "akka.actor.debug.lifecycle" -> true, "akka.actor.debug.event-stream" -> true,
- "akka.loglevel" -> "DEBUG", "akka.stdout-loglevel" -> "DEBUG")
+ "akka.actor.debug.lifecycle" → true, "akka.actor.debug.event-stream" → true,
+ "akka.loglevel" → "DEBUG", "akka.stdout-loglevel" → "DEBUG")
val system = ActorSystem("AkkaSpec1", ConfigFactory.parseMap(conf.asJava).withFallback(AkkaSpec.testConf))
var refs = Seq.empty[ActorRef]
val spec = new AkkaSpec(system) { refs = Seq(testActor, system.actorOf(Props.empty, "name")) }
diff --git a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala
index 9edabd8d16..1ce81c9d39 100644
--- a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala
@@ -78,7 +78,7 @@ object Coroner {
*/
def watch(duration: FiniteDuration, reportTitle: String, out: PrintStream,
startAndStopDuration: FiniteDuration = defaultStartAndStopDuration,
- displayThreadCounts: Boolean = false): WatchHandle = {
+ displayThreadCounts: Boolean = false): WatchHandle = {
val watchedHandle = new WatchHandleImpl(startAndStopDuration)
diff --git a/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala
index e8a62440fd..d280bfb113 100644
--- a/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala
@@ -3,7 +3,7 @@ package akka.testkit
import scala.concurrent.duration._
import org.scalatest.exceptions.TestFailedException
-class TestTimeSpec extends AkkaSpec(Map("akka.test.timefactor" -> 2.0)) {
+class TestTimeSpec extends AkkaSpec(Map("akka.test.timefactor" → 2.0)) {
"A TestKit" must {
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala
index 7ecf45d4d2..3d28cda3bd 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala
@@ -18,15 +18,15 @@ private[akka] class FileDescriptorMetricSet(os: OperatingSystemMXBean = Manageme
override def getMetrics: util.Map[String, Metric] = {
Map[String, Metric](
- name("file-descriptors", "open") -> new Gauge[Long] {
+ name("file-descriptors", "open") → new Gauge[Long] {
override def getValue: Long = invoke("getOpenFileDescriptorCount")
},
- name("file-descriptors", "max") -> new Gauge[Long] {
+ name("file-descriptors", "max") → new Gauge[Long] {
override def getValue: Long = invoke("getMaxFileDescriptorCount")
},
- name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava
+ name("file-descriptors", "ratio") → new FileDescriptorRatioGauge(os)).asJava
}
private def invoke(name: String): Long = {
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
index 943f020050..9ab2ced147 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala
@@ -16,9 +16,9 @@ import org.{ HdrHistogram ⇒ hdr }
* integer between 0 and 5.
*/
private[akka] class HdrHistogram(
- highestTrackableValue: Long,
+ highestTrackableValue: Long,
numberOfSignificantValueDigits: Int,
- val unit: String = "")
+ val unit: String = "")
extends Metric {
private val hist = new hdr.Histogram(highestTrackableValue, numberOfSignificantValueDigits)
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala
index 00ecb7ade8..2990f2cc98 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala
@@ -191,7 +191,7 @@ trait AkkaMetricRegistry {
for {
(key, metric) ← getMetrics.asScala
if clazz.isInstance(metric)
- } yield key -> metric.asInstanceOf[T]
+ } yield key → metric.asInstanceOf[T]
}
private[akka] class MetricsKitSettings(config: Config) {
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
index 016e864cea..6f46358d72 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala
@@ -91,6 +91,6 @@ private[metrics] trait MetricsPrefix extends MetricSet {
abstract override def getMetrics: util.Map[String, Metric] = {
// does not have to be fast, is only called once during registering registry
import collection.JavaConverters._
- (super.getMetrics.asScala.map { case (k, v) ⇒ (prefix / k).toString -> v }).asJava
+ (super.getMetrics.asScala.map { case (k, v) ⇒ (prefix / k).toString → v }).asJava
}
}
diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
index da56d171e4..69d0e77a10 100644
--- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
+++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala
@@ -15,8 +15,8 @@ import scala.reflect.ClassTag
*/
class AkkaConsoleReporter(
registry: AkkaMetricRegistry,
- verbose: Boolean,
- output: PrintStream = System.out)
+ verbose: Boolean,
+ output: PrintStream = System.out)
extends ScheduledReporter(registry.asInstanceOf[MetricRegistry], "akka-console-reporter", MetricFilter.ALL, TimeUnit.SECONDS, TimeUnit.NANOSECONDS) {
private final val ConsoleWidth = 80
diff --git a/akka-typed/src/main/scala/akka/typed/ActorContext.scala b/akka-typed/src/main/scala/akka/typed/ActorContext.scala
index 459240729b..f8593a9bee 100644
--- a/akka-typed/src/main/scala/akka/typed/ActorContext.scala
+++ b/akka-typed/src/main/scala/akka/typed/ActorContext.scala
@@ -155,9 +155,9 @@ trait ActorContext[T] {
* See [[EffectfulActorContext]] for more advanced uses.
*/
class StubbedActorContext[T](
- val name: String,
+ val name: String,
override val props: Props[T])(
- override implicit val system: ActorSystem[Nothing]) extends ActorContext[T] {
+ override implicit val system: ActorSystem[Nothing]) extends ActorContext[T] {
val inbox = Inbox.sync[T](name)
override val self = inbox.ref
@@ -169,7 +169,7 @@ class StubbedActorContext[T](
override def child(name: String): Option[ActorRef[Nothing]] = _children get name map (_.ref)
override def spawnAnonymous[U](props: Props[U]): ActorRef[U] = {
val i = Inbox.sync[U](childName.next())
- _children += i.ref.untypedRef.path.name -> i
+ _children += i.ref.untypedRef.path.name → i
i.ref
}
override def spawn[U](props: Props[U], name: String): ActorRef[U] =
@@ -177,12 +177,12 @@ class StubbedActorContext[T](
case Some(_) ⇒ throw new untyped.InvalidActorNameException(s"actor name $name is already taken")
case None ⇒
val i = Inbox.sync[U](name)
- _children += name -> i
+ _children += name → i
i.ref
}
override def actorOf(props: untyped.Props): untyped.ActorRef = {
val i = Inbox.sync[Any](childName.next())
- _children += i.ref.untypedRef.path.name -> i
+ _children += i.ref.untypedRef.path.name → i
i.ref.untypedRef
}
override def actorOf(props: untyped.Props, name: String): untyped.ActorRef =
@@ -190,7 +190,7 @@ class StubbedActorContext[T](
case Some(_) ⇒ throw new untyped.InvalidActorNameException(s"actor name $name is already taken")
case None ⇒
val i = Inbox.sync[Any](name)
- _children += name -> i
+ _children += name → i
i.ref.untypedRef
}
override def stop(child: ActorRef[Nothing]): Boolean = {
diff --git a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala
index 0357243743..2d338f2731 100644
--- a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala
+++ b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala
@@ -127,8 +127,8 @@ object ActorSystem {
private class Wrapper(val untyped: ExtendedActorSystem) extends ActorSystem[Nothing](untyped.name) with ScalaActorRef[Nothing]
def apply[T](name: String, guardianProps: Props[T],
- config: Option[Config] = None,
- classLoader: Option[ClassLoader] = None,
+ config: Option[Config] = None,
+ classLoader: Option[ClassLoader] = None,
executionContext: Option[ExecutionContext] = None): ActorSystem[T] = {
val cl = classLoader.getOrElse(akka.actor.ActorSystem.findClassLoader())
val appConfig = config.getOrElse(ConfigFactory.load(cl))
diff --git a/akka-typed/src/main/scala/akka/typed/Ask.scala b/akka-typed/src/main/scala/akka/typed/Ask.scala
index f63bc50966..1cd33d67a5 100644
--- a/akka-typed/src/main/scala/akka/typed/Ask.scala
+++ b/akka-typed/src/main/scala/akka/typed/Ask.scala
@@ -37,11 +37,13 @@ object AskPattern {
private class PromiseRef[U](actorRef: ActorRef[_], timeout: Timeout) {
val (ref: ActorRef[U], future: Future[U], promiseRef: PromiseActorRef) = actorRef.untypedRef match {
case ref: InternalActorRef if ref.isTerminated ⇒
- (ActorRef[U](ref.provider.deadLetters),
+ (
+ ActorRef[U](ref.provider.deadLetters),
Future.failed[U](new AskTimeoutException(s"Recipient[$actorRef] had already been terminated.")))
case ref: InternalActorRef ⇒
if (timeout.duration.length <= 0)
- (ActorRef[U](ref.provider.deadLetters),
+ (
+ ActorRef[U](ref.provider.deadLetters),
Future.failed[U](new IllegalArgumentException(s"Timeout length must be positive, question not sent to [$actorRef]")))
else {
val a = PromiseActorRef(ref.provider, timeout, actorRef, "unknown")
diff --git a/akka-typed/src/main/scala/akka/typed/Behavior.scala b/akka-typed/src/main/scala/akka/typed/Behavior.scala
index b68f3eee7a..ebeb630a0a 100644
--- a/akka-typed/src/main/scala/akka/typed/Behavior.scala
+++ b/akka-typed/src/main/scala/akka/typed/Behavior.scala
@@ -3,7 +3,6 @@
*/
package akka.typed
-
/**
* The behavior of an actor defines how it reacts to the messages that it
* receives. The message may either be of the type that the Actor declares
diff --git a/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala b/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala
index 7144ff798a..0fad48a56a 100644
--- a/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala
+++ b/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala
@@ -193,10 +193,11 @@ class ActorContextSpec extends TypedSpec(ConfigFactory.parseString(
* The latter is very useful in order to avoid disturbances with GotSignal(PostStop) in
* test procedures that stop this child.
*/
- def mkChild(name: Option[String],
- monitor: ActorRef[Event],
- self: ActorRef[Event],
- inert: Boolean = false): StepWise.Steps[Event, (ActorRef[Command], ActorRef[Command])] = {
+ def mkChild(
+ name: Option[String],
+ monitor: ActorRef[Event],
+ self: ActorRef[Event],
+ inert: Boolean = false): StepWise.Steps[Event, (ActorRef[Command], ActorRef[Command])] = {
val s =
startWith.keep { subj ⇒
subj ! MkChild(name, monitor, self)
diff --git a/akka-typed/src/test/scala/akka/typed/StepWise.scala b/akka-typed/src/test/scala/akka/typed/StepWise.scala
index 91074a38f6..7c614072f0 100644
--- a/akka-typed/src/test/scala/akka/typed/StepWise.scala
+++ b/akka-typed/src/test/scala/akka/typed/StepWise.scala
@@ -91,7 +91,7 @@ object StepWise {
copy(ops = MultiMessage(timeout, count, (msgs, value) ⇒ { f.asInstanceOf[(Seq[Any], Any) ⇒ Any](msgs, value); value }, getTrace()) :: ops)
def expectFailureKeep(timeout: FiniteDuration)(f: (Failed, U) ⇒ Failed.Decision): Steps[T, U] =
- copy(ops = Failure(timeout, (failed, value) ⇒ f.asInstanceOf[(Failed, Any) ⇒ Failed.Decision](failed, value) -> value, getTrace()) :: ops)
+ copy(ops = Failure(timeout, (failed, value) ⇒ f.asInstanceOf[(Failed, Any) ⇒ Failed.Decision](failed, value) → value, getTrace()) :: ops)
def expectTerminationKeep(timeout: FiniteDuration)(f: (Terminated, U) ⇒ Unit): Steps[T, U] =
copy(ops = Termination(timeout, (t, value) ⇒ { f.asInstanceOf[(Terminated, Any) ⇒ Any](t, value); value }, getTrace()) :: ops)
diff --git a/akka-typed/src/test/scala/akka/typed/TypedSpec.scala b/akka-typed/src/test/scala/akka/typed/TypedSpec.scala
index c7aa92fd44..5e38e24f54 100644
--- a/akka-typed/src/test/scala/akka/typed/TypedSpec.scala
+++ b/akka-typed/src/test/scala/akka/typed/TypedSpec.scala
@@ -68,11 +68,11 @@ abstract class TypedSpec(config: Config) extends Spec with Matchers with BeforeA
}
def muteExpectedException[T <: Exception: ClassTag](
- message: String = null,
- source: String = null,
- start: String = "",
- pattern: String = null,
- occurrences: Int = Int.MaxValue): EventFilter = {
+ message: String = null,
+ source: String = null,
+ start: String = "",
+ pattern: String = null,
+ occurrences: Int = Int.MaxValue): EventFilter = {
val filter = EventFilter(message, source, start, pattern, occurrences)
system.eventStream.publish(Mute(filter))
filter
diff --git a/project/Formatting.scala b/project/Formatting.scala
index f74a89c22f..5ef02c6957 100644
--- a/project/Formatting.scala
+++ b/project/Formatting.scala
@@ -9,31 +9,35 @@ import com.typesafe.sbt.SbtScalariform
import com.typesafe.sbt.SbtScalariform.ScalariformKeys
object Formatting {
- lazy val formatSettings = SbtScalariform.scalariformSettings ++ Seq(
- ScalariformKeys.preferences in Compile := formattingPreferences,
- ScalariformKeys.preferences in Test := formattingPreferences,
- ScalariformKeys.preferences in MultiJvm := formattingPreferences
+ lazy val formatSettings = Seq(
+ ScalariformKeys.preferences in Compile <<= formattingPreferences,
+ ScalariformKeys.preferences in Test <<= formattingPreferences,
+ ScalariformKeys.preferences in MultiJvm <<= formattingPreferences
)
- lazy val docFormatSettings = SbtScalariform.scalariformSettings ++ Seq(
- ScalariformKeys.preferences in Compile := docFormattingPreferences,
- ScalariformKeys.preferences in Test := docFormattingPreferences,
- ScalariformKeys.preferences in MultiJvm := docFormattingPreferences
+ lazy val docFormatSettings = Seq(
+ ScalariformKeys.preferences in Compile <<= docFormattingPreferences,
+ ScalariformKeys.preferences in Test <<= docFormattingPreferences,
+ ScalariformKeys.preferences in MultiJvm <<= docFormattingPreferences
)
- def formattingPreferences = {
+ def formattingPreferences = Def.setting {
import scalariform.formatter.preferences._
- FormattingPreferences()
+ ScalariformKeys.preferences.value
.setPreference(RewriteArrowSymbols, true)
.setPreference(AlignParameters, true)
.setPreference(AlignSingleLineCaseStatements, true)
+ .setPreference(DanglingCloseParenthesis, Preserve)
+ .setPreference(DoubleIndentClassDeclaration, false)
}
- def docFormattingPreferences = {
+ def docFormattingPreferences = Def.setting {
import scalariform.formatter.preferences._
- FormattingPreferences()
+ ScalariformKeys.preferences.value
.setPreference(RewriteArrowSymbols, false)
.setPreference(AlignParameters, true)
.setPreference(AlignSingleLineCaseStatements, true)
+ .setPreference(DanglingCloseParenthesis, Preserve)
+ .setPreference(DoubleIndentClassDeclaration, false)
}
}
diff --git a/project/plugins.sbt b/project/plugins.sbt
index 9b5025d278..e863d3bbdb 100644
--- a/project/plugins.sbt
+++ b/project/plugins.sbt
@@ -9,7 +9,7 @@ resolvers += "Bintray Jcenter" at "https://jcenter.bintray.com/"
addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.3.8")
//#sbt-multi-jvm
-addSbtPlugin("com.typesafe.sbt" % "sbt-scalariform" % "1.2.0")
+addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.6.0")
addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "0.7.1")