diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala index bc8162c9bf..cf285449f3 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala @@ -130,7 +130,7 @@ import akka.annotation.InternalApi // this is backward compatible with the old behaviour, hence it uses the loader used to load the test-kit // which is not necessarily the one used to load the tests... // hence this might not include reference config related to the actually executing test - //todo: might be better NOT to pass any class loader and let typesafeConfig rely on the contextClassLoader + // todo: might be better NOT to pass any class loader and let typesafeConfig rely on the contextClassLoader // (which is usually the system class loader) def defaultReference: Config = ConfigFactory.defaultReference(getClass.getClassLoader) } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala index 082b10949b..1308d70fbf 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala @@ -137,11 +137,11 @@ private[akka] final class BehaviorTestKitImpl[T]( try { context.setCurrentActorThread() try { - //we need this to handle message adapters related messages + // we need this to handle message adapters related messages val intercepted = BehaviorTestKitImpl.Interceptor.inteceptBehaviour(current, context) currentUncanonical = Behavior.interpretMessage(intercepted, context, message) - //notice we pass current and not intercepted, this way Behaviors.same will be resolved to current which will be intercepted again on the next message - //otherwise we would have risked intercepting an already intercepted behavior (or would have had to explicitly check if the current behavior is already intercepted by us) + // notice we pass current and not intercepted, this way Behaviors.same will be resolved to current which will be intercepted again on the next message + // otherwise we would have risked intercepting an already intercepted behavior (or would have had to explicitly check if the current behavior is already intercepted by us) current = Behavior.canonicalize(currentUncanonical, current, context) } finally { context.clearCurrentActorThread() @@ -201,8 +201,8 @@ private[akka] object BehaviorTestKitImpl { def inteceptBehaviour[T](behavior: Behavior[T], ctx: TypedActorContext[T]): Behavior[T] = Behavior .start(Behaviors.intercept { () => - this.asInstanceOf[BehaviorInterceptor[Any, T]] - }(behavior), ctx.asInstanceOf[TypedActorContext[Any]]) + this.asInstanceOf[BehaviorInterceptor[Any, T]] + }(behavior), ctx.asInstanceOf[TypedActorContext[Any]]) .unsafeCast[T] } } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala index 9f8aaac38f..c92facae07 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala @@ -166,12 +166,13 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T val i = new BehaviorTestKitImpl[U](system, p, BehaviorImpl.ignore) _children += p.name -> i - new FunctionRef[U](p, (message, _) => { - val m = f(message); - if (m != null) { - selfInbox.ref ! m; i.selfInbox().ref ! message - } - }) + new FunctionRef[U](p, + (message, _) => { + val m = f(message); + if (m != null) { + selfInbox.ref ! m; i.selfInbox().ref ! message + } + }) } /** diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala index ec599a9c51..941c4dfd0e 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala @@ -134,8 +134,9 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) val prevEnd = end end = start + maxDiff - val ret = try f - finally end = prevEnd + val ret = + try f + finally end = prevEnd val diff = now - start assert(min <= diff, s"block took ${diff.pretty}, should at least have been $min") diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala index 09a7caf174..30ebaa4cef 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala @@ -38,8 +38,8 @@ import akka.util.Timeout * * @Test * public void testBlah() throws Exception { - * // spawn actors etc using the testKit - * ActorRef ref = testKit.spawn(behavior); + * // spawn actors etc using the testKit + * ActorRef ref = testKit.spawn(behavior); * } * } * }}} diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala index db20baf120..7bdd2c6771 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala @@ -254,8 +254,8 @@ final class ActorTestKit private[akka] ( def stop[T](ref: ActorRef[T], max: FiniteDuration = timeout.duration): Unit = try { Await.result(internalTestKitGuardian.ask { (x: ActorRef[ActorTestKitGuardian.Ack.type]) => - ActorTestKitGuardian.StopActor(ref, x) - }(Timeout(max), scheduler), max) + ActorTestKitGuardian.StopActor(ref, x) + }(Timeout(max), scheduler), max) } catch { case _: TimeoutException => assert(false, s"timeout ($max) during stop() waiting for actor [${ref.path}] to stop") diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala index c67de6bedc..574914cf3e 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala @@ -21,7 +21,6 @@ package object scaladsl { * * Uses the scaling factor from the `TestTimeFactor` in the [[TestKitSettings]] * (in implicit scope). - * */ implicit class TestDuration(val duration: FiniteDuration) extends AnyVal { def dilated(implicit settings: TestKitSettings): FiniteDuration = settings.dilated(duration) diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala index ad70db5502..5248128e12 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala @@ -82,9 +82,9 @@ class ActorTestKitSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike wi "spawn a named actor" in { val spawnedWithName = Promise[String]() spawn(Behaviors.setup[AnyRef] { context => - spawnedWithName.trySuccess(context.self.path.name) - Behaviors.empty - }, "name") + spawnedWithName.trySuccess(context.self.path.name) + Behaviors.empty + }, "name") spawnedWithName.future.futureValue should ===("name") } diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala index c18d454e38..ee4827ec08 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala @@ -88,8 +88,8 @@ object BehaviorTestKitSpec { Behaviors.same case SpawnAdapterWithName(name) => context.spawnMessageAdapter({ (r: Reproduce) => - SpawnAnonymous(r.times) - }, name) + SpawnAnonymous(r.times) + }, name) Behaviors.same case SpawnAndWatchUnwatch(name) => val c = context.spawn(Child.initial, name) @@ -420,11 +420,11 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { testkit.run(ScheduleCommand("abc", 42.seconds, Effect.TimerScheduled.SingleMode, SpawnChild)) testkit.expectEffectPF { case Effect.TimerScheduled( - "abc", - SpawnChild, - finiteDuration, - Effect.TimerScheduled.SingleMode, - false /*not overriding*/ ) => + "abc", + SpawnChild, + finiteDuration, + Effect.TimerScheduled.SingleMode, + false /*not overriding*/ ) => finiteDuration should equal(42.seconds) } testkit.run(IsTimerActive("abc", t.ref)) @@ -456,7 +456,7 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { testkit.expectEffectPF { case Effect.Spawned(_, "child", _) => } - //no effect since the timer's mode was single, hence removed after fired + // no effect since the timer's mode was single, hence removed after fired send() testkit.selfInbox().hasMessages should be(false) } @@ -485,7 +485,7 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { testkit.expectEffect { Effect.Stopped("child") } - //when scheduling with fixed rate the timer remains scheduled + // when scheduling with fixed rate the timer remains scheduled send() testkit.runOne() testkit.expectEffectPF { diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala index a9b1bb0789..ba496e0f9d 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala @@ -67,11 +67,11 @@ class TestAppenderSpec "only filter events for given logger name" in { val count = new AtomicInteger LoggingTestKit - .custom({ + .custom { case logEvent => count.incrementAndGet() logEvent.message == "Hello from right logger" && logEvent.loggerName == classOf[AnotherLoggerClass].getName - }) + } .withOccurrences(2) .withLoggerName(classOf[AnotherLoggerClass].getName) .expect { diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala index 44444a741d..48b76e5635 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala @@ -22,9 +22,9 @@ class TestProbeSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with probe.fishForMessage(shortDuration) { case _ => FishingOutcomes.complete } - probe.awaitAssert({ + probe.awaitAssert { "result" - }) + } probe.expectMessageType[String] probe.expectMessage("whoa") probe.expectNoMessage() diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala index 7e61d0258e..2ff1dff96a 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala @@ -27,7 +27,7 @@ import scala.util.Success import scala.util.Try object AsyncTestingExampleSpec { - //#under-test + // #under-test object Echo { case class Ping(message: String, response: ActorRef[Pong]) case class Pong(message: String) @@ -38,9 +38,9 @@ object AsyncTestingExampleSpec { Behaviors.same } } - //#under-test + // #under-test - //#under-test-2 + // #under-test-2 case class Message(i: Int, replyTo: ActorRef[Try[Int]]) class Producer(publisher: ActorRef[Message])(implicit scheduler: Scheduler) { @@ -54,7 +54,7 @@ object AsyncTestingExampleSpec { } } - //#under-test-2 + // #under-test-2 } @@ -62,29 +62,29 @@ object AsyncTestingExampleSpec { class AsyncTestingExampleSpec extends AnyWordSpec with BeforeAndAfterAll - //#test-header + // #test-header with LogCapturing - //#test-header + // #test-header with Matchers { val testKit = ActorTestKit() - //#test-header + // #test-header import AsyncTestingExampleSpec._ "A testkit" must { "support verifying a response" in { - //#test-spawn + // #test-spawn val pinger = testKit.spawn(Echo(), "ping") val probe = testKit.createTestProbe[Echo.Pong]() pinger ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) - //#test-spawn + // #test-spawn } "support verifying a response - anonymous" in { - //#test-spawn-anonymous + // #test-spawn-anonymous val pinger = testKit.spawn(Echo()) - //#test-spawn-anonymous + // #test-spawn-anonymous val probe = testKit.createTestProbe[Echo.Pong]() pinger ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) @@ -93,7 +93,7 @@ class AsyncTestingExampleSpec "be able to stop actors under test" in { // Will fail with 'name not unique' exception if the first actor is not fully stopped val probe = testKit.createTestProbe[Echo.Pong]() - //#test-stop-actors + // #test-stop-actors val pinger1 = testKit.spawn(Echo(), "pinger") pinger1 ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) @@ -104,12 +104,12 @@ class AsyncTestingExampleSpec pinger2 ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) testKit.stop(pinger2, 10.seconds) // Custom timeout - //#test-stop-actors + // #test-stop-actors } "support observing mocked behavior" in { - //#test-observe-mocked-behavior + // #test-observe-mocked-behavior import testKit._ // simulate the happy path @@ -130,13 +130,13 @@ class AsyncTestingExampleSpec val msg = probe.expectMessageType[Message] msg.i shouldBe i } - //#test-observe-mocked-behavior + // #test-observe-mocked-behavior } } - //#test-shutdown + // #test-shutdown override def afterAll(): Unit = testKit.shutdownTestKit() - //#test-shutdown + // #test-shutdown //#test-header } //#test-header diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala index 2e3504e99f..24f9819d24 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala @@ -43,7 +43,7 @@ class ManualTimerExampleSpec manualTime.expectNoMessageFor(10.seconds, probe) } - //#manual-scheduling-simple + // #manual-scheduling-simple "schedule repeated ticks" in { case object Tick @@ -113,7 +113,7 @@ class ManualTimerExampleSpec probe.expectMessage(Tock(2)) } - //#manual-scheduling-simple + // #manual-scheduling-simple } } //#manual-scheduling-simple diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala index 5d18f71373..b2977a12e1 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala @@ -19,13 +19,13 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec object SyncTestingExampleSpec { - //#child + // #child val childActor = Behaviors.receiveMessage[String] { _ => Behaviors.same[String] } - //#child + // #child - //#under-test + // #under-test object Hello { sealed trait Command case object CreateAnonymousChild extends Command @@ -58,7 +58,7 @@ object SyncTestingExampleSpec { who ! "hello" Behaviors.same } - //#under-test + // #under-test } object ConfigAware { @@ -89,57 +89,57 @@ class SyncTestingExampleSpec extends AnyWordSpec with Matchers { "Typed actor synchronous testing" must { "record spawning" in { - //#test-child + // #test-child val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.CreateChild("child")) testKit.expectEffect(Spawned(childActor, "child")) - //#test-child + // #test-child } "record spawning anonymous" in { - //#test-anonymous-child + // #test-anonymous-child val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.CreateAnonymousChild) testKit.expectEffect(SpawnedAnonymous(childActor)) - //#test-anonymous-child + // #test-anonymous-child } "record message sends" in { - //#test-message + // #test-message val testKit = BehaviorTestKit(Hello()) val inbox = TestInbox[String]() testKit.run(Hello.SayHello(inbox.ref)) inbox.expectMessage("hello") - //#test-message + // #test-message } "send a message to a spawned child" in { - //#test-child-message + // #test-child-message val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.SayHelloToChild("child")) val childInbox = testKit.childInbox[String]("child") childInbox.expectMessage("hello") - //#test-child-message + // #test-child-message } "send a message to an anonymous spawned child" in { - //#test-child-message-anonymous + // #test-child-message-anonymous val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.SayHelloToAnonymousChild) val child = testKit.expectEffectType[SpawnedAnonymous[String]] val childInbox = testKit.childInbox(child.ref) childInbox.expectMessage("hello stranger") - //#test-child-message-anonymous + // #test-child-message-anonymous } "log a message to the logger" in { - //#test-check-logging + // #test-check-logging val testKit = BehaviorTestKit(Hello()) val inbox = TestInbox[String]("Inboxer") testKit.run(Hello.LogAndSayHello(inbox.ref)) testKit.logEntries() shouldBe Seq(CapturedLogEvent(Level.INFO, "Saying hello to Inboxer")) - //#test-check-logging + // #test-check-logging } "has access to the provided config" in { diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala index cc3ac65994..52ea76c096 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala @@ -8,24 +8,24 @@ object TestConfigExample { def illustrateApplicationConfig(): Unit = { - //#default-application-conf + // #default-application-conf import com.typesafe.config.ConfigFactory ConfigFactory.load() - //#default-application-conf + // #default-application-conf - //#parse-string + // #parse-string ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.log-config-on-start = on """) - //#parse-string + // #parse-string - //#fallback-application-conf + // #fallback-application-conf ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.log-config-on-start = on """).withFallback(ConfigFactory.load()) - //#fallback-application-conf + // #fallback-application-conf } } diff --git a/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala b/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala index e064629472..b934a52168 100644 --- a/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala @@ -18,11 +18,11 @@ class AkkaExceptionSpec extends AnyWordSpec with Matchers { "AkkaException" must { "have a AkkaException(String msg) constructor to be serialization friendly" in { - //if the call to this method completes, we know what there is at least a single constructor which has - //the expected argument type. + // if the call to this method completes, we know what there is at least a single constructor which has + // the expected argument type. verify(classOf[AkkaException]) - //lets also try it for the exception that triggered this bug to be discovered. + // lets also try it for the exception that triggered this bug to be discovered. verify(classOf[ActorKilledException]) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala index aa65bffa9c..90460eddd0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala @@ -169,10 +169,10 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS Thread.sleep(50) "po" } - // Here, we implicitly close over the actor instance and access the context - // when the flatMap thunk is run. Previously, the context was nulled when the actor - // was terminated. This isn't done any more. Still, the pattern of `import context.dispatcher` - // is discouraged as closing over `context` is unsafe in general. + // Here, we implicitly close over the actor instance and access the context + // when the flatMap thunk is run. Previously, the context was nulled when the actor + // was terminated. This isn't done any more. Still, the pattern of `import context.dispatcher` + // is discouraged as closing over `context` is unsafe in general. .flatMap(x => Future { x + "ng" } /* implicitly: (this.context.dispatcher) */ ) .recover { case _: NullPointerException => "npe" } .pipeTo(replyTo) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index 85f096124f..b6289d8aa5 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -159,8 +159,8 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => actorOf(Props(promiseIntercept(new FailingOuterActor(actorOf(Props(new InnerActor))))(result)))) + wrap(result => + actorOf(Props(promiseIntercept(new FailingOuterActor(actorOf(Props(new InnerActor))))(result)))) } contextStackMustBeEmpty() @@ -168,8 +168,8 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) + wrap(result => + actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) } contextStackMustBeEmpty() @@ -196,10 +196,9 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 2).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => - actorOf(Props(new FailingInheritingOuterActor( - actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) + wrap(result => + actorOf(Props(new FailingInheritingOuterActor( + actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty() @@ -247,22 +246,21 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => - actorOf( - Props(new OuterActor(actorOf(Props(promiseIntercept({ new InnerActor; new InnerActor })(result))))))) + wrap(result => + actorOf( + Props(new OuterActor(actorOf(Props(promiseIntercept { new InnerActor; new InnerActor }(result))))))) } contextStackMustBeEmpty() } EventFilter[ActorInitializationException](occurrences = 1).intercept { - (intercept[java.lang.IllegalStateException] { + intercept[java.lang.IllegalStateException] { wrap(result => - actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept({ + actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept { throw new IllegalStateException("Ur state be b0rked") - })(result))))))) - }).getMessage should ===("Ur state be b0rked") + }(result))))))) + }.getMessage should ===("Ur state be b0rked") contextStackMustBeEmpty() } @@ -272,9 +270,9 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1, pattern = "/user/failingActor:").intercept { intercept[java.lang.IllegalStateException] { wrap(result => - system.actorOf(Props(promiseIntercept({ - throw new IllegalStateException - })(result)), "failingActor")) + system.actorOf(Props(promiseIntercept { + throw new IllegalStateException + }(result)), "failingActor")) } } } @@ -325,9 +323,9 @@ class ActorRefSpec extends AkkaSpec(""" val in = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray)) - (intercept[java.lang.IllegalStateException] { + intercept[java.lang.IllegalStateException] { in.readObject - }).getMessage should ===( + }.getMessage should ===( "Trying to deserialize a serialized ActorRef without an ActorSystem in scope." + " Use 'akka.serialization.JavaSerializer.currentSystem.withValue(system) { ... }'") } @@ -422,8 +420,8 @@ class ActorRefSpec extends AkkaSpec(""" } })) - val ffive = (ref.ask(5)(timeout)).mapTo[String] - val fnull = (ref.ask(0)(timeout)).mapTo[String] + val ffive = ref.ask(5)(timeout).mapTo[String] + val fnull = ref.ask(0)(timeout).mapTo[String] ref ! PoisonPill Await.result(ffive, timeout.duration) should ===("five") @@ -459,15 +457,15 @@ class ActorRefSpec extends AkkaSpec(""" "be able to check for existence of children" in { val parent = system.actorOf(Props(new Actor { - val child = context.actorOf(Props(new Actor { - def receive = { case _ => } - }), "child") + val child = context.actorOf(Props(new Actor { + def receive = { case _ => } + }), "child") - def receive = { case name: String => sender() ! context.child(name).isDefined } - }), "parent") + def receive = { case name: String => sender() ! context.child(name).isDefined } + }), "parent") - assert(Await.result((parent ? "child"), timeout.duration) === true) - assert(Await.result((parent ? "whatnot"), timeout.duration) === false) + assert(Await.result(parent ? "child", timeout.duration) === true) + assert(Await.result(parent ? "whatnot", timeout.duration) === false) } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala index ec1fcfe7cf..b4edb3b036 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala @@ -49,10 +49,11 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { val root = sysImpl.lookupRoot def empty(path: String) = - new EmptyLocalActorRef(sysImpl.provider, path match { - case RelativeActorPath(elems) => sysImpl.lookupRoot.path / elems - case _ => throw new RuntimeException() - }, system.eventStream) + new EmptyLocalActorRef(sysImpl.provider, + path match { + case RelativeActorPath(elems) => sysImpl.lookupRoot.path / elems + case _ => throw new RuntimeException() + }, system.eventStream) val idProbe = TestProbe() @@ -128,7 +129,7 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { val a2 = system.actorOf(p, name) a2.path should ===(a1.path) a2.path.toString should ===(a1.path.toString) - a2 should not be (a1) + a2 should not be a1 a2.toString should not be (a1.toString) watch(a2) @@ -253,12 +254,12 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { } def check(looker: ActorRef): Unit = { for ((l, r) <- Seq( - SelectString("a/b/c") -> None, - SelectString("akka://all-systems/Nobody") -> None, - SelectPath(system / "hallo") -> None, - SelectPath(looker.path.child("hallo")) -> None, // test Java API - SelectPath(looker.path.descendant(Seq("a", "b").asJava)) -> None) // test Java API - ) checkOne(looker, l, r) + SelectString("a/b/c") -> None, + SelectString("akka://all-systems/Nobody") -> None, + SelectPath(system / "hallo") -> None, + SelectPath(looker.path.child("hallo")) -> None, // test Java API + SelectPath(looker.path.descendant(Seq("a", "b").asJava)) -> None) // test Java API + ) checkOne(looker, l, r) } for (looker <- all) check(looker) } @@ -289,8 +290,8 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { implicit val sender = c1 ActorSelection(c21, "../../*") ! GetSender(testActor) val actors = Set() ++ receiveWhile(messages = 2) { - case `c1` => lastSender - } + case `c1` => lastSender + } actors should ===(Set(c1, c2)) expectNoMessage() } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index af79807bab..d152a9121f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -32,10 +32,10 @@ object ActorSystemSpec { case n: Int => master = sender() terminaters = Set() ++ (for (_ <- 1 to n) yield { - val man = context.watch(context.system.actorOf(Props[Terminater]())) - man ! "run" - man - }) + val man = context.watch(context.system.actorOf(Props[Terminater]())) + man ! "run" + man + }) case Terminated(child) if terminaters contains child => terminaters -= child if (terminaters.isEmpty) { @@ -123,14 +123,14 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend "reject invalid names" in { for (n <- Seq( - "-hallowelt", - "_hallowelt", - "hallo*welt", - "hallo@welt", - "hallo#welt", - "hallo$welt", - "hallo%welt", - "hallo/welt")) intercept[IllegalArgumentException] { + "-hallowelt", + "_hallowelt", + "hallo*welt", + "hallo@welt", + "hallo#welt", + "hallo$welt", + "hallo%welt", + "hallo/welt")) intercept[IllegalArgumentException] { ActorSystem(n) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala index 06dc9ebef5..630cd2e718 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala @@ -24,7 +24,7 @@ class ActorTimeoutSpec extends AkkaSpec { "use implicitly supplied timeout" in { implicit val timeout = Timeout(testTimeout) val echo = system.actorOf(Props.empty) - val f = (echo ? "hallo") + val f = echo ? "hallo" intercept[AskTimeoutException] { Await.result(f, testTimeout + leeway) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala index 9a35a0c353..e9956ec30e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala @@ -28,11 +28,11 @@ object ConsistencySpec { } } """ - class CacheMisaligned(var value: Long, var padding1: Long, var padding2: Long, var padding3: Int) //Vars, no final fences + class CacheMisaligned(var value: Long, var padding1: Long, var padding2: Long, var padding3: Int) // Vars, no final fences class ConsistencyCheckingActor extends Actor { - var left = new CacheMisaligned(42, 0, 0, 0) //var - var right = new CacheMisaligned(0, 0, 0, 0) //var + var left = new CacheMisaligned(42, 0, 0, 0) // var + var right = new CacheMisaligned(0, 0, 0, 0) // var var lastStep = -1L def receive = { case step: Long => diff --git a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala index 74596010dc..6778f3ddaa 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala @@ -79,7 +79,8 @@ class CoordinatedShutdownSpec // a, b can be in any order result2.toSet should ===(Set("a", "b", "c")) - checkTopologicalSort(Map("b" -> phase("a"), "c" -> phase("b"), "d" -> phase("b", "c"), "e" -> phase("d"))) should ===( + checkTopologicalSort(Map("b" -> phase("a"), "c" -> phase("b"), "d" -> phase("b", "c"), + "e" -> phase("d"))) should ===( List("a", "b", "c", "d", "e")) val result3 = @@ -324,13 +325,14 @@ class CoordinatedShutdownSpec val shouldBeCancelled = cancellables.zipWithIndex.collect { case (c, i) if i % 2 == 0 => c } - val cancelFutures = for { - _ <- cancellables - c <- shouldBeCancelled - } yield Future { - c.cancel() shouldBe true - Done - } + val cancelFutures = + for { + _ <- cancellables + c <- shouldBeCancelled + } yield Future { + c.cancel() shouldBe true + Done + } cancelFutures.foldLeft(Future.successful(Done)) { case (acc, fut) => acc.flatMap(_ => fut) @@ -785,7 +787,7 @@ class CoordinatedShutdownSpec withSystemRunning(newSystem, cs) TestKit.shutdownActorSystem(newSystem) - shutdownHooks should have size (0) + shutdownHooks should have size 0 protected def myHooksCount: Int = synchronized(shutdownHooks.size) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index b246cb32d1..02a74ddf9e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -39,8 +39,8 @@ object DeathWatchSpec { def receive = { case "NKOTB" => val currentKid = context.watch(context.actorOf(Props(new Actor { - def receive = { case "NKOTB" => context.stop(self) } - }), "kid")) + def receive = { case "NKOTB" => context.stop(self) } + }), "kid")) currentKid.forward("NKOTB") context.become { case Terminated(`currentKid`) => @@ -171,7 +171,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => monitor2 ! "ping" - expectMsg("pong") //Needs to be here since watch and unwatch are asynchronous + expectMsg("pong") // Needs to be here since watch and unwatch are asynchronous terminal ! PoisonPill diff --git a/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala index 335603fb84..68e6844c3d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala @@ -47,7 +47,8 @@ class DynamicAccessSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll } "try different constructors with recoverWith" in { - instantiateWithDefaultOrStringCtor("akka.actor.TestClassWithStringConstructor").get.name shouldBe "string ctor argument" + instantiateWithDefaultOrStringCtor( + "akka.actor.TestClassWithStringConstructor").get.name shouldBe "string ctor argument" instantiateWithDefaultOrStringCtor("akka.actor.TestClassWithDefaultConstructor").get.name shouldBe "default" instantiateWithDefaultOrStringCtor("akka.actor.foo.NonExistingClass") match { case Failure(t) => diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index 61d32199e3..cd83c6073a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -44,7 +44,7 @@ object FSMActorSpec { soFar + digit match { case incomplete if incomplete.length < code.length => stay().using(CodeState(incomplete, code)) - case codeTry if (codeTry == code) => { + case codeTry if codeTry == code => { doUnlock() goto(Open).using(CodeState("", code)).forMax(timeout) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala index c2fe7623c2..1e0166979c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala @@ -17,7 +17,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become in its constructor" in { val a = system.actorOf(Props(new Becomer { context.become { case always => sender() ! always } - def receive = { case _ => sender() ! "FAILURE" } + def receive = { case _ => sender() ! "FAILURE" } })) a ! "pigdog" expectMsg("pigdog") @@ -25,7 +25,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become multiple times in its constructor" in { val a = system.actorOf(Props(new Becomer { - for (i <- 1 to 4) context.become({ case always => sender() ! s"$i:$always" }) + for (i <- 1 to 4) context.become { case always => sender() ! s"$i:$always" } def receive = { case _ => sender() ! "FAILURE" } })) a ! "pigdog" @@ -35,7 +35,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become with stacking in its constructor" in { val a = system.actorOf(Props(new Becomer { context.become({ case always => sender() ! "pigdog:" + always; context.unbecome() }, false) - def receive = { case always => sender() ! "badass:" + always } + def receive = { case always => sender() ! "badass:" + always } })) a ! "pigdog" expectMsg("pigdog:pigdog") @@ -62,7 +62,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { val a = system.actorOf(Props(new Actor { def receive = { case "init" => sender() ! "init" - case "swap" => context.become({ case x: String => context.sender() ! x }) + case "swap" => context.become { case x: String => context.sender() ! x } } })) @@ -78,10 +78,10 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { def receive = { case "init" => sender() ! "init" case "swap" => - context.become({ + context.become { case "swapped" => sender() ! "swapped" case "revert" => context.unbecome() - }) + } } })) @@ -103,11 +103,11 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { def receive = { case "state" => sender() ! "0" case "swap" => - context.become({ + context.become { case "state" => sender() ! "1" case "swapped" => sender() ! "swapped" case "crash" => throw new Exception("Crash (expected)!") - }) + } sender() ! "swapped" } })) diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 5d75f5d525..ea052cae99 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -113,10 +113,10 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi // the fields are cleared after the Terminated message has been sent, // so we need to check for a reasonable time after we receive it awaitAssert({ - val childProps2 = child.asInstanceOf[LocalActorRef].underlying.props - childProps2 should not be theSameInstanceAs(childProps1) - (childProps2 should be).theSameInstanceAs(ActorCell.terminatedProps) - }, 1 second) + val childProps2 = child.asInstanceOf[LocalActorRef].underlying.props + childProps2 should not be theSameInstanceAs(childProps1) + (childProps2 should be).theSameInstanceAs(ActorCell.terminatedProps) + }, 1 second) } } @@ -135,11 +135,11 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi for (_ <- 1 to 4) yield Future(system.actorOf(Props(new Actor { def receive = { case _ => } }), address)) val set: Set[Any] = Set() ++ actors.map(a => - Await.ready(a, timeout.duration).value match { - case Some(Success(_: ActorRef)) => 1 - case Some(Failure(_: InvalidActorNameException)) => 2 - case x => x - }) + Await.ready(a, timeout.duration).value match { + case Some(Success(_: ActorRef)) => 1 + case Some(Failure(_: InvalidActorNameException)) => 2 + case x => x + }) set should ===(Set[Any](1, 2)) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala index 2c6a4bdb74..c6925a27f8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala @@ -50,7 +50,7 @@ class ProviderSelectionSpec extends AbstractSpec { "create a Custom ProviderSelection and set custom provider fqcn in Settings" in { val other = "other.ActorRefProvider" - val ps = ProviderSelection.Custom(other) //checked by dynamicAccess + val ps = ProviderSelection.Custom(other) // checked by dynamicAccess ps.fqcn shouldEqual "other.ActorRefProvider" ps.hasCluster shouldBe false settingsWith(other).ProviderClass shouldEqual ps.fqcn diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index e95ac596bc..09dbc51f03 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -240,11 +240,12 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit "stop continuous scheduling if the task throws exception" taggedAs TimingTest in { EventFilter[Exception]("TEST", occurrences = 1).intercept { val count = new AtomicInteger(0) - collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, () => { - val c = count.incrementAndGet() - testActor ! c - if (c == 3) throw new RuntimeException("TEST") with NoStackTrace - })) + collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, + () => { + val c = count.incrementAndGet() + testActor ! c + if (c == 3) throw new RuntimeException("TEST") with NoStackTrace + })) expectMsg(1) expectMsg(2) expectMsg(3) @@ -256,24 +257,26 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit // when first throws EventFilter[Exception]("TEST-1", occurrences = 1).intercept { val count1 = new AtomicInteger(0) - collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, () => { - val c = count1.incrementAndGet() - if (c == 1) - throw new IllegalStateException("TEST-1") with NoStackTrace - else - testActor ! c - })) + collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, + () => { + val c = count1.incrementAndGet() + if (c == 1) + throw new IllegalStateException("TEST-1") with NoStackTrace + else + testActor ! c + })) expectNoMessage(200.millis) } // when later EventFilter[Exception]("TEST-3", occurrences = 1).intercept { val count2 = new AtomicInteger(0) - collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, () => { - val c = count2.incrementAndGet() - testActor ! c - if (c == 3) throw new IllegalStateException("TEST-3") with NoStackTrace - })) + collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, + () => { + val c = count2.incrementAndGet() + testActor ! c + if (c == 3) throw new IllegalStateException("TEST-3") with NoStackTrace + })) expectMsg(1) expectMsg(2) expectMsg(3) @@ -286,9 +289,10 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit val initialDelay = 200.millis.dilated val delay = 10.millis.dilated - val timeout = collectCancellable(scheduleAdapter.schedule(initialDelay, delay, () => { - ticks.incrementAndGet() - })) + val timeout = collectCancellable(scheduleAdapter.schedule(initialDelay, delay, + () => { + ticks.incrementAndGet() + })) Thread.sleep(10.millis.dilated.toMillis) timeout.cancel() Thread.sleep((initialDelay + 100.millis.dilated).toMillis) @@ -301,9 +305,10 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit val initialDelay = 90.millis.dilated val delay = 500.millis.dilated - val timeout = collectCancellable(scheduleAdapter.schedule(initialDelay, delay, () => { - ticks.incrementAndGet() - })) + val timeout = collectCancellable(scheduleAdapter.schedule(initialDelay, delay, + () => { + ticks.incrementAndGet() + })) Thread.sleep((initialDelay + 200.millis.dilated).toMillis) timeout.cancel() Thread.sleep((delay + 100.millis.dilated).toMillis) @@ -473,9 +478,10 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev val counter = new AtomicInteger val terminated = Future { var rounds = 0 - while (Try(sched.scheduleOnce(Duration.Zero, new Scheduler.TaskRunOnClose { - override def run(): Unit = () - })(localEC)).isSuccess) { + while (Try(sched.scheduleOnce(Duration.Zero, + new Scheduler.TaskRunOnClose { + override def run(): Unit = () + })(localEC)).isSuccess) { Thread.sleep(1) driver.wakeUp(step) rounds += 1 @@ -485,9 +491,10 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev def delay = if (ThreadLocalRandom.current.nextBoolean) step * 2 else step val N = 1000000 (1 to N).foreach(_ => - sched.scheduleOnce(delay, new Scheduler.TaskRunOnClose { - override def run(): Unit = counter.incrementAndGet() - })) + sched.scheduleOnce(delay, + new Scheduler.TaskRunOnClose { + override def run(): Unit = counter.incrementAndGet() + })) sched.close() Await.result(terminated, 3.seconds.dilated) should be > 10 awaitAssert(counter.get should ===(N)) @@ -614,9 +621,10 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev var overrun = headroom val cap = 1000000 val (success, failure) = Iterator - .continually(Try(sched.scheduleOnce(100.millis, new Scheduler.TaskRunOnClose { - override def run(): Unit = counter.incrementAndGet() - }))) + .continually(Try(sched.scheduleOnce(100.millis, + new Scheduler.TaskRunOnClose { + override def run(): Unit = counter.incrementAndGet() + }))) .take(cap) .takeWhile(_.isSuccess || { overrun -= 1; overrun >= 0 }) .partition(_.isSuccess) @@ -632,9 +640,10 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev import system.dispatcher val counter = new AtomicInteger() sched.scheduleOnce(10.seconds)(counter.incrementAndGet()) - sched.scheduleOnce(10.seconds, new Scheduler.TaskRunOnClose { - override def run(): Unit = counter.incrementAndGet() - }) + sched.scheduleOnce(10.seconds, + new Scheduler.TaskRunOnClose { + override def run(): Unit = counter.incrementAndGet() + }) driver.close() sched.close() counter.get should ===(1) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala index 8d2c1a01c0..433b81a4c3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala @@ -170,7 +170,7 @@ object SupervisorHierarchySpec { val sizes = s / kids var rest = s % kids val propsTemplate = Props.empty.withDispatcher("hierarchy") - (1 to kids).iterator.map { (id) => + (1 to kids).iterator.map { id => val kidSize = if (rest > 0) { rest -= 1; sizes + 1 } else sizes @@ -821,14 +821,15 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w "suspend children while failing" taggedAs LongRunningTest in { val latch = TestLatch() - val slowResumer = system.actorOf(Props(new Actor { - override def supervisorStrategy = OneForOneStrategy() { - case _ => Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume - } - def receive = { - case "spawn" => sender() ! context.actorOf(Props[Resumer]()) - } - }), "slowResumer") + val slowResumer = system.actorOf( + Props(new Actor { + override def supervisorStrategy = OneForOneStrategy() { + case _ => Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume + } + def receive = { + case "spawn" => sender() ! context.actorOf(Props[Resumer]()) + } + }), "slowResumer") slowResumer ! "spawn" val boss = expectMsgType[ActorRef] boss ! "spawn" @@ -867,24 +868,24 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w } val child = context.actorOf(Props(new Actor { - val ca = createAttempt.incrementAndGet() + val ca = createAttempt.incrementAndGet() - if (ca <= 6 && ca % 3 == 0) - context.actorOf(Props(new Actor { override def receive = { case _ => } }), "workingChild") + if (ca <= 6 && ca % 3 == 0) + context.actorOf(Props(new Actor { override def receive = { case _ => } }), "workingChild") - if (ca < 6) { - throw new IllegalArgumentException("OH NO!") - } - override def preStart() = { - preStartCalled.incrementAndGet() - } - override def postRestart(reason: Throwable) = { - postRestartCalled.incrementAndGet() - } - override def receive = { - case m => sender() ! m - } - }), "failChild") + if (ca < 6) { + throw new IllegalArgumentException("OH NO!") + } + override def preStart() = { + preStartCalled.incrementAndGet() + } + override def postRestart(reason: Throwable) = { + postRestartCalled.incrementAndGet() + } + override def receive = { + case m => sender() ! m + } + }), "failChild") override def receive = { case m => child.forward(m) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index 7f121ba627..c99e4ce3a8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -197,7 +197,7 @@ class SupervisorSpec def kill(pingPongActor: ActorRef) = { val result = pingPongActor.?(DieReply)(DilatedTimeout) - expectMsg(Timeout, ExceptionMessage) //this is sent from PingPongActor's postRestart() + expectMsg(Timeout, ExceptionMessage) // this is sent from PingPongActor's postRestart() intercept[RuntimeException] { Await.result(result, DilatedTimeout) } } @@ -218,7 +218,7 @@ class SupervisorSpec } "restart properly when same instance is returned" in { - val restarts = 3 //max number of restarts + val restarts = 3 // max number of restarts lazy val childInstance = new Actor { var preRestarts = 0 var postRestarts = 0 @@ -439,17 +439,17 @@ class SupervisorSpec "not lose system messages when a NonFatal exception occurs when processing a system message" in { val parent = system.actorOf(Props(new Actor { - override val supervisorStrategy = OneForOneStrategy()({ + override val supervisorStrategy = OneForOneStrategy() { case e: IllegalStateException if e.getMessage == "OHNOES" => throw e case _ => SupervisorStrategy.Restart - }) + } val child = context.watch(context.actorOf(Props(new Actor { - override def postRestart(reason: Throwable): Unit = testActor ! "child restarted" - def receive = { - case l: TestLatch => { Await.ready(l, 5 seconds); throw new IllegalStateException("OHNOES") } - case "test" => sender() ! "child green" - } - }), "child")) + override def postRestart(reason: Throwable): Unit = testActor ! "child restarted" + def receive = { + case l: TestLatch => { Await.ready(l, 5 seconds); throw new IllegalStateException("OHNOES") } + case "test" => sender() ! "child green" + } + }), "child")) override def postRestart(reason: Throwable): Unit = testActor ! "parent restarted" @@ -559,7 +559,7 @@ class SupervisorSpec val pingpong = child(supervisor, Props(new PingPongActor(testActor))) - //impossible to confirm if the restart window is infinite, so making sure maxNrOfRetries is respected correctly + // impossible to confirm if the restart window is infinite, so making sure maxNrOfRetries is respected correctly kill(pingpong) kill(pingpong) kill(pingpong) diff --git a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala index 409bd3a5eb..267807e8e2 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala @@ -21,7 +21,7 @@ class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender // TODO: does this really make sense? override def atStartup(): Unit = { - Thread.interrupted() //remove interrupted status. + Thread.interrupted() // remove interrupted status. } "A supervised actor with lifecycle PERMANENT" should { diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index e1d7f2c276..ec1579157d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -177,7 +177,7 @@ object TypedActorSpec { } class StackedImpl extends Stacked { - override def stacked: String = "FOOBAR" //Uppercase + override def stacked: String = "FOOBAR" // Uppercase } trait LifeCycles { @@ -410,18 +410,18 @@ class TypedActorSpec t.incr() t.failingPigdog() - t.read() should ===(1) //Make sure state is not reset after failure + t.read() should ===(1) // Make sure state is not reset after failure intercept[IllegalStateException] { Await.result(t.failingFuturePigdog(), 2 seconds) }.getMessage should ===( "expected") - t.read() should ===(1) //Make sure state is not reset after failure + t.read() should ===(1) // Make sure state is not reset after failure intercept[IllegalStateException] { t.failingJOptionPigdog() }.getMessage should ===("expected") - t.read() should ===(1) //Make sure state is not reset after failure + t.read() should ===(1) // Make sure state is not reset after failure intercept[IllegalStateException] { t.failingOptionPigdog() }.getMessage should ===("expected") - t.read() should ===(1) //Make sure state is not reset after failure + t.read() should ===(1) // Make sure state is not reset after failure mustStop(t) } @@ -559,14 +559,14 @@ class TypedActorSpec t.crash() } - //Sneak in a check for the Receiver override + // Sneak in a check for the Receiver override val ref = ta.getActorRefFor(t) ref.tell("pigdog", testActor) expectMsg(timeout.duration, "dogpig") - //Done with that now + // Done with that now ta.poisonPill(t) latch.await(10, TimeUnit.SECONDS) should ===(true) diff --git a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala index 3e02ea7578..27cbb90582 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala @@ -25,7 +25,7 @@ object UidClashTest { val eventStream: EventStream) extends MinimalActorRef { - //Ignore everything + // Ignore everything override def isTerminated: Boolean = true override def sendSystemMessage(message: SystemMessage): Unit = () override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = () diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index ae6f74f5fc..6f2870a0b0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -261,9 +261,9 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa def awaitStarted(ref: ActorRef): Unit = { awaitCond(ref match { - case r: RepointableRef => r.isStarted - case _ => true - }, 1 second, 10 millis) + case r: RepointableRef => r.isStarted + case _ => true + }, 1 second, 10 millis) } protected def interceptedDispatcher(): MessageDispatcherInterceptor @@ -647,7 +647,7 @@ class BalancingDispatcherModelSpec extends ActorModelSpec(BalancingDispatcherMod system.stop(a) system.stop(b) - while (!a.isTerminated && !b.isTerminated) {} //Busy wait for termination + while (!a.isTerminated && !b.isTerminated) {} // Busy wait for termination assertRefDefaultZero(a)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1) assertRefDefaultZero(b)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala index 4e50b7a36c..72dfa0d39f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala @@ -175,7 +175,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend } "get the correct types of dispatchers" in { - //All created/obtained dispatchers are of the expected type/instance + // All created/obtained dispatchers are of the expected type/instance assert(typesAndValidators.forall(tuple => tuple._2(allDispatchers(tuple._1)))) } diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 4261ec2ed1..112410e248 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -89,7 +89,7 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin { val c = config.getConfig("akka.actor.default-dispatcher") - //General dispatcher config + // General dispatcher config { c.getString("type") should ===("Dispatcher") @@ -100,13 +100,13 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin c.getBoolean("attempt-teamwork") should ===(true) } - //Default executor config + // Default executor config { val pool = c.getConfig("default-executor") pool.getString("fallback") should ===("fork-join-executor") } - //Fork join executor config + // Fork join executor config { val pool = c.getConfig("fork-join-executor") @@ -116,7 +116,7 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin pool.getString("task-peeking-mode") should be("FIFO") } - //Thread pool executor config + // Thread pool executor config { val pool = c.getConfig("thread-pool-executor") diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala index 2e454241db..ec140d906c 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala @@ -27,16 +27,16 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { val es = Executors.newCachedThreadPool() try { val executor: Executor with ExecutionContext = ExecutionContext.fromExecutor(es) - executor should not be (null) + executor should not be null val executorService: ExecutorService with ExecutionContext = ExecutionContext.fromExecutorService(es) - executorService should not be (null) + executorService should not be null val jExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(es) - jExecutor should not be (null) + jExecutor should not be null val jExecutorService: ExecutionContextExecutorService = ExecutionContexts.fromExecutorService(es) - jExecutorService should not be (null) + jExecutorService should not be null } finally { es.shutdown } @@ -60,7 +60,7 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { (1 to 100).foreach { _ => batchable { if (callingThreadLock.get != 0) p.tryFailure(new IllegalStateException("Batch was executed inline!")) - else if (count.incrementAndGet == 100) p.trySuccess(()) //Done + else if (count.incrementAndGet == 100) p.trySuccess(()) // Done else if (lock.compareAndSet(0, 1)) { try Thread.sleep(10) finally lock.compareAndSet(1, 0) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 3cee62d6c6..a5a6a1182c 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -82,7 +82,7 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn } } - //CANDIDATE FOR TESTKIT + // CANDIDATE FOR TESTKIT def spawn[T <: AnyRef](fun: => T): Future[T] = Future(fun)(ExecutionContext.global) def createMessageInvocation(msg: Any): Envelope = Envelope(msg, system.deadLetters, system) @@ -138,7 +138,7 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn val q = factory(config) ensureInitialMailboxState(config, q) - EventFilter.warning(pattern = "received dead letter", occurrences = (enqueueN - dequeueN)).intercept { + EventFilter.warning(pattern = "received dead letter", occurrences = enqueueN - dequeueN).intercept { def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn { val messages = Vector() ++ (for (i <- fromNum to toNum) yield createMessageInvocation(i)) @@ -171,13 +171,13 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn val ps = producers.map(Await.result(_, remainingOrDefault)) val cs = consumers.map(Await.result(_, remainingOrDefault)) - ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages - cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages - //No message is allowed to be consumed by more than one consumer + ps.map(_.size).sum should ===(enqueueN) // Must have produced 1000 messages + cs.map(_.size).sum should ===(dequeueN) // Must have consumed all produced messages + // No message is allowed to be consumed by more than one consumer cs.flatten.distinct.size should ===(dequeueN) - //All consumed messages should have been produced + // All consumed messages should have been produced cs.flatten.diff(ps.flatten).size should ===(0) - //The ones that were produced and not consumed + // The ones that were produced and not consumed ps.flatten.diff(cs.flatten).size should ===(enqueueN - dequeueN) } } @@ -248,9 +248,9 @@ class CustomMailboxSpec extends AkkaSpec(CustomMailboxSpec.config) { "support custom mailboxType" in { val actor = system.actorOf(Props.empty.withDispatcher("my-dispatcher")) awaitCond(actor match { - case r: RepointableRef => r.isStarted - case _ => true - }, 1 second, 10 millis) + case r: RepointableRef => r.isStarted + case _ => true + }, 1 second, 10 millis) val queue = actor.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].mailbox.messageQueue queue.getClass should ===(classOf[CustomMailboxSpec.MyMailbox]) } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala index df3f051029..1f1b7f54b8 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -28,17 +28,17 @@ object PriorityDispatcherSpec { class Unbounded(@unused settings: ActorSystem.Settings, @unused config: Config) extends UnboundedPriorityMailbox(PriorityGenerator({ - case i: Int => i //Reverse order + case i: Int => i // Reverse order case Result => Int.MaxValue case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser }: Any => Int)) class Bounded(@unused settings: ActorSystem.Settings, @unused config: Config) extends BoundedPriorityMailbox(PriorityGenerator({ - case i: Int => i //Reverse order - case Result => Int.MaxValue - case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser - }: Any => Int), 1000, 10 seconds) + case i: Int => i // Reverse order + case Result => Int.MaxValue + case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser + }: Any => Int), 1000, 10 seconds) } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala index 2a97f4381d..52615a6b0c 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala @@ -35,11 +35,11 @@ object StablePriorityDispatcherSpec { class Bounded(@unused settings: ActorSystem.Settings, @unused config: Config) extends BoundedStablePriorityMailbox(PriorityGenerator({ - case i: Int if i <= 100 => i // Small integers have high priority - case _: Int => 101 // Don't care for other integers - case Result => Int.MaxValue - case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser - }: Any => Int), 1000, 10 seconds) + case i: Int if i <= 100 => i // Small integers have high priority + case _: Int => 101 // Don't care for other integers + case Result => Int.MaxValue + case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser + }: Any => Int), 1000, 10 seconds) } diff --git a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala index 18d2c7d8fb..fb3434a164 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala @@ -308,7 +308,7 @@ class ScanningEventBusSpec extends EventBusSpec("ScanningEventBus") { def createNewEventBus(): BusType = new MyScanningEventBus - def createEvents(numberOfEvents: Int) = (0 until numberOfEvents) + def createEvents(numberOfEvents: Int) = 0 until numberOfEvents def createSubscriber(pipeTo: ActorRef) = new Procedure[Int] { def apply(i: Int) = pipeTo ! i } @@ -339,7 +339,7 @@ class LookupEventBusSpec extends EventBusSpec("LookupEventBus") { def createNewEventBus(): BusType = new MyLookupEventBus - def createEvents(numberOfEvents: Int) = (0 until numberOfEvents) + def createEvents(numberOfEvents: Int) = 0 until numberOfEvents def createSubscriber(pipeTo: ActorRef) = new Procedure[Int] { def apply(i: Int) = pipeTo ! i } diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index 9547631271..5cac228b3e 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -74,10 +74,10 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { "An EventStream" must { "manage subscriptions" in { - //#event-bus-start-unsubscriber-scala + // #event-bus-start-unsubscriber-scala val bus = new EventStream(system, true) bus.startUnsubscriber() - //#event-bus-start-unsubscriber-scala + // #event-bus-start-unsubscriber-scala bus.subscribe(testActor, classOf[M]) bus.publish(M(42)) @@ -294,8 +294,8 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { val tm = new A val target = sys.actorOf(Props(new Actor { - def receive = { case in => a1.ref.forward(in) } - }), "to-be-killed") + def receive = { case in => a1.ref.forward(in) } + }), "to-be-killed") es.subscribe(a2.ref, classOf[Any]) es.subscribe(target, classOf[A]) should ===(true) @@ -322,8 +322,8 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { val probe = TestProbe() val terminated = system.actorOf(Props(new Actor { - def receive = { case _ => } - }), "to-be-killed") + def receive = { case _ => } + }), "to-be-killed") watch(terminated) terminated ! PoisonPill diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala index ff488701b4..054307318b 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala @@ -96,7 +96,7 @@ object LoggerSpec { sender() ! LoggerInitialized case SetTarget(ref, `qualifier`) => target = Some(ref) - ref ! ("OK") + ref ! "OK" case event: LogEvent if !event.mdc.isEmpty => print(event) target.foreach { _ ! event } @@ -173,7 +173,7 @@ class LoggerSpec extends AnyWordSpec with Matchers { "log messages to standard output" in { val out = createSystemAndLogToBuffer("defaultLogger", defaultConfig, true) - out.size should be > (0) + out.size should be > 0 } "drain logger queue on system.terminate" in { diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala index 37fa06eab8..f62b277cb3 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala @@ -68,9 +68,10 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { system.eventStream.subscribe(testActor, classOf[UnhandledMessage]) val a = system.actorOf(Props(new Actor { def receive = - new LoggingReceive(Some("funky"), { - case null => - }) + new LoggingReceive(Some("funky"), + { + case null => + }) })) a ! "hallo" expectMsg( diff --git a/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala b/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala index 937aa460f8..3c9c2be932 100644 --- a/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala @@ -58,9 +58,9 @@ class JavaLoggerSpec extends AkkaSpec(JavaLoggerSpec.config) { val record = expectMsgType[logging.LogRecord] - record should not be (null) - record.getMillis should not be (0) - record.getThreadID should not be (0) + record should not be null + record.getMillis should not be 0 + record.getThreadID should not be 0 record.getLevel should ===(logging.Level.SEVERE) record.getMessage should ===("Simulated error") record.getThrown.getClass should ===(classOf[JavaLoggerSpec.SimulatedExc]) @@ -73,9 +73,9 @@ class JavaLoggerSpec extends AkkaSpec(JavaLoggerSpec.config) { val record = expectMsgType[logging.LogRecord] - record should not be (null) - record.getMillis should not be (0) - record.getThreadID should not be (0) + record should not be null + record.getMillis should not be 0 + record.getThreadID should not be 0 record.getLevel should ===(logging.Level.INFO) record.getMessage should ===("3 is the magic number") record.getThrown should ===(null) diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala index 754c5e0b1d..c92285bd1b 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala @@ -380,32 +380,33 @@ class TcpConnectionSpec extends AkkaSpec(""" override implicit lazy val system: ActorSystem = ActorSystem("respectPullModeTest", config) try run { - val maxBufferSize = 1 * 1024 - val ts = "t" * maxBufferSize - val us = "u" * (maxBufferSize / 2) + val maxBufferSize = 1 * 1024 + val ts = "t" * maxBufferSize + val us = "u" * (maxBufferSize / 2) - // send a batch that is bigger than the default buffer to make sure we don't recurse and - // send more than one Received messages - serverSideChannel.write(ByteBuffer.wrap((ts ++ us).getBytes("ASCII"))) - connectionHandler.expectNoMessage(100.millis) + // send a batch that is bigger than the default buffer to make sure we don't recurse and + // send more than one Received messages + serverSideChannel.write(ByteBuffer.wrap((ts ++ us).getBytes("ASCII"))) + connectionHandler.expectNoMessage(100.millis) - connectionActor ! ResumeReading - connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(ts) + connectionActor ! ResumeReading + connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(ts) - connectionHandler.expectNoMessage(100.millis) + connectionHandler.expectNoMessage(100.millis) - connectionActor ! ResumeReading - connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(us) + connectionActor ! ResumeReading + connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(us) - connectionHandler.expectNoMessage(100.millis) + connectionHandler.expectNoMessage(100.millis) - val vs = "v" * (maxBufferSize / 2) - serverSideChannel.write(ByteBuffer.wrap(vs.getBytes("ASCII"))) + val vs = "v" * (maxBufferSize / 2) + serverSideChannel.write(ByteBuffer.wrap(vs.getBytes("ASCII"))) - connectionActor ! ResumeReading + connectionActor ! ResumeReading - connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(vs) - } finally shutdown(system) + connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(vs) + } + finally shutdown(system) } "close the connection and reply with `Closed` upon reception of a `Close` command" in @@ -653,7 +654,7 @@ class TcpConnectionSpec extends AkkaSpec(""" override lazy val connectionActor = createConnectionActor(serverAddress = UnboundAddress, timeout = Option(100.millis)) run { - connectionActor.toString should not be ("") + connectionActor.toString should not be "" userHandler.expectMsg(CommandFailed(Connect(UnboundAddress, timeout = Option(100.millis)))) watch(connectionActor) expectTerminated(connectionActor) @@ -982,7 +983,7 @@ class TcpConnectionSpec extends AkkaSpec(""" override def run(body: => Unit): Unit = super.run { try { serverSideChannel.configureBlocking(false) - serverSideChannel should not be (null) + serverSideChannel should not be null interestCallReceiver.expectMsg(OP_CONNECT) selector.send(connectionActor, ChannelConnectable) diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala index 52a6ba33cd..436d1b9353 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala @@ -50,7 +50,7 @@ trait TcpIntegrationSpecSupport { this: AkkaSpec => connectCommander.sender() ! Register(clientHandler.ref) bindHandler.expectMsgType[Connected] match { - case Connected(`localAddress`, `endpoint`) => //ok + case Connected(`localAddress`, `endpoint`) => // ok case other => fail(s"No match: ${other}") } val serverHandler = TestProbe() diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala index cb312eb794..0e3b844020 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala @@ -116,7 +116,7 @@ class AsyncDnsResolverIntegrationSpec val answer = (IO(Dns) ? DnsProtocol.Resolve(name)).mapTo[DnsProtocol.Resolved].futureValue answer.name shouldEqual name answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set("a-single.bar.example") - answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.2.20")) + answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.2.20")) } "resolve internal CNAME record" in { @@ -124,7 +124,7 @@ class AsyncDnsResolverIntegrationSpec val answer = resolve(name) answer.name shouldEqual name answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set("a-double.foo.test") - answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set( + answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set( InetAddress.getByName("192.168.1.21"), InetAddress.getByName("192.168.1.22")) } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala index 102ac6d886..e3649a0415 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala @@ -227,8 +227,9 @@ class AsyncDnsResolverSpec extends AkkaSpec(""" def resolver(clients: List[ActorRef], config: Config): ActorRef = { val settings = new DnsSettings(system.asInstanceOf[ExtendedActorSystem], config) - system.actorOf(Props(new AsyncDnsResolver(settings, new SimpleDnsCache(), (_, _) => { - clients - }))) + system.actorOf(Props(new AsyncDnsResolver(settings, new SimpleDnsCache(), + (_, _) => { + clients + }))) } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala index 8c0b79cca5..8eca542fc4 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala @@ -133,8 +133,8 @@ class AskSpec extends AkkaSpec(""" system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) val echo = system.actorOf(Props(new Actor { - def receive = { case x => context.actorSelection(sender().path) ! x } - }), "select-echo2") + def receive = { case x => context.actorSelection(sender().path) ! x } + }), "select-echo2") val f = echo ? "hi" Await.result(f, 1 seconds) should ===("hi") @@ -164,13 +164,13 @@ class AskSpec extends AkkaSpec(""" system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) val echo = system.actorOf(Props(new Actor { - def receive = { - case x => - val name = sender().path.name - val parent = sender().path.parent - context.actorSelection(parent / ".." / "temp" / name) ! x - } - }), "select-echo4") + def receive = { + case x => + val name = sender().path.name + val parent = sender().path.parent + context.actorSelection(parent / ".." / "temp" / name) ! x + } + }), "select-echo4") val f = echo ? "hi" intercept[AskTimeoutException] { Await.result(f, 1 seconds) should ===("hi") @@ -185,12 +185,12 @@ class AskSpec extends AkkaSpec(""" system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) val echo = system.actorOf(Props(new Actor { - def receive = { - case x => - val parent = sender().path.parent - context.actorSelection(parent / "missing") ! x - } - }), "select-echo5") + def receive = { + case x => + val parent = sender().path.parent + context.actorSelection(parent / "missing") ! x + } + }), "select-echo5") val f = echo ? "hi" intercept[AskTimeoutException] { Await.result(f, 1 seconds) should ===("hi") @@ -204,11 +204,11 @@ class AskSpec extends AkkaSpec(""" system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) val echo = system.actorOf(Props(new Actor { - def receive = { - case x => - context.actorSelection(sender().path / "missing") ! x - } - }), "select-echo6") + def receive = { + case x => + context.actorSelection(sender().path / "missing") ! x + } + }), "select-echo6") val f = echo ? "hi" intercept[AskTimeoutException] { Await.result(f, 1 seconds) should ===(ActorSelectionMessage("hi", Vector(SelectChildName("missing")), false)) @@ -242,10 +242,10 @@ class AskSpec extends AkkaSpec(""" val p = TestProbe() val act = system.actorOf(Props(new Actor { - def receive = { - case msg => p.ref ! sender() -> msg - } - }), "myName") + def receive = { + case msg => p.ref ! sender() -> msg + } + }), "myName") (act ? "ask").mapTo[String] val (promiseActorRef, "ask") = p.expectMsgType[(ActorRef, String)] diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala index 21ab9c2487..1edf230500 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala @@ -273,9 +273,9 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec(""" // note that the message could be lost (dead letters) because ended up with previous crashed child probe.awaitAssert({ - supervisor ! "PING" - probe.expectMsg("PING") - }, 1.second) + supervisor ! "PING" + probe.expectMsg("PING") + }, 1.second) } } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala index 57bd9bab9e..f3f0026036 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala @@ -253,7 +253,7 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually } EventFilter.warning(pattern = ".*boom.*", occurrences = 1).intercept { - supervisor ! "boom" //this will be sent to deadLetters + supervisor ! "boom" // this will be sent to deadLetters expectNoMessage(500.milliseconds) } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala index 533979ddd6..ac0b67e822 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala @@ -24,9 +24,9 @@ class CircuitBreakerMTSpec extends AkkaSpec { // returns true if the breaker is open def failingCall(): Boolean = Await.result(breaker.withCircuitBreaker(Future.failed(new RuntimeException("FAIL"))).recover { - case _: CircuitBreakerOpenException => true - case _ => false - }, remainingOrDefault) + case _: CircuitBreakerOpenException => true + case _ => false + }, remainingOrDefault) // fire some failing calls (1 to (maxFailures + 1)).foreach { _ => diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala index 837a96988d..9a30642c30 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala @@ -349,7 +349,7 @@ class CircuitBreakerSpec extends AkkaSpec(""" breaker().currentFailureCount should ===(0) intercept[TestException] { val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread - breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" }) + breaker().withSyncCircuitBreaker { if (Thread.currentThread() eq ct) throwException else "fail" } } breaker().currentFailureCount should ===(1) breaker().withSyncCircuitBreaker(sayHi) @@ -362,7 +362,7 @@ class CircuitBreakerSpec extends AkkaSpec(""" breaker().currentFailureCount should ===(0) intercept[TestException] { val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread - breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" }) + breaker().withSyncCircuitBreaker { if (Thread.currentThread() eq ct) throwException else "fail" } } breaker().currentFailureCount should ===(1) @@ -385,7 +385,7 @@ class CircuitBreakerSpec extends AkkaSpec(""" breaker().currentFailureCount should ===(0) intercept[TestException] { val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread - breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" }) + breaker().withSyncCircuitBreaker { if (Thread.currentThread() eq ct) throwException else "fail" } } breaker().currentFailureCount should ===(1) breaker().succeed() diff --git a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala index 32a23337da..d934b34ce8 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala @@ -29,10 +29,10 @@ class RetrySpec extends AkkaSpec with RetrySupport { @volatile var counter = 0 val retried = retry( () => - Future.successful({ + Future.successful { counter += 1 counter - }), + }, 5, 1 second) @@ -94,10 +94,11 @@ class RetrySpec extends AkkaSpec with RetrySupport { } else Future.successful(5) } - val retried = retry(() => attempt(), 5, attempted => { - attemptedCount = attempted - Some(100.milliseconds * attempted) - }) + val retried = retry(() => attempt(), 5, + attempted => { + attemptedCount = attempted + Some(100.milliseconds * attempted) + }) within(30000000 seconds) { intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("6") attemptedCount shouldBe 5 diff --git a/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala index 8f1c8b0331..0dead180c3 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala @@ -37,10 +37,10 @@ class ExplicitAskSpec extends AkkaSpec { implicit val timeout: Timeout = Timeout(5.seconds) val target = system.actorOf(Props(new Actor { - def receive = { - case Request(respondTo) => respondTo ! Response(self) - } - }), "select-echo") + def receive = { + case Request(respondTo) => respondTo ! Response(self) + } + }), "select-echo") val selection = system.actorSelection("/user/select-echo") val f = selection ? (respondTo => Request(respondTo)) diff --git a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala index 26d36c4cc2..be75223493 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala @@ -232,12 +232,12 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT val resizer = DefaultOptimalSizeExploringResizer() val router = TestRouter(routees(2)) val msgs1 = router.sendToAll(await = true) - val msgs2 = router.sendToAll(await = false) //make sure the routees are still busy after the first batch of messages get processed. + val msgs2 = router.sendToAll(await = false) // make sure the routees are still busy after the first batch of messages get processed. val before = System.nanoTime() - resizer.reportMessageCount(router.routees, router.msgs.size) //updates the records + resizer.reportMessageCount(router.routees, router.msgs.size) // updates the records - msgs1.foreach(_.second.open()) //process two messages + msgs1.foreach(_.second.open()) // process two messages // make sure some time passes in-between Thread.sleep(300) @@ -263,12 +263,12 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT val router = TestRouter(routees(2)) val msgs1 = router.sendToAll(await = true) - val msgs2 = router.sendToAll(await = false) //make sure the routees are still busy after the first batch of messages get processed. + val msgs2 = router.sendToAll(await = false) // make sure the routees are still busy after the first batch of messages get processed. val before = System.nanoTime() - resizer.reportMessageCount(router.routees, router.msgs.size) //updates the records + resizer.reportMessageCount(router.routees, router.msgs.size) // updates the records - msgs1.foreach(_.second.open()) //process two messages + msgs1.foreach(_.second.open()) // process two messages // make sure some time passes in-between Thread.sleep(300) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala index 5911ca4e41..e6f29ab459 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala @@ -23,14 +23,14 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val stopLatch = new TestLatch(7) val actor = system.actorOf(RandomPool(7).props(Props(new Actor { - def receive = { - case "hello" => sender() ! "world" - } + def receive = { + case "hello" => sender() ! "world" + } - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "random-shutdown") + override def postStop(): Unit = { + stopLatch.countDown() + } + })), "random-shutdown") actor ! "hello" actor ! "hello" @@ -58,12 +58,12 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } val actor = system.actorOf(RandomPool(connectionCount).props(routeeProps = Props(new Actor { - lazy val id = counter.getAndIncrement() - def receive = { - case "hit" => sender() ! id - case "end" => doneLatch.countDown() - } - })), name = "random") + lazy val id = counter.getAndIncrement() + def receive = { + case "hit" => sender() ! id + case "end" => doneLatch.countDown() + } + })), name = "random") for (_ <- 0 until iterationCount) { for (_ <- 0 until connectionCount) { @@ -77,7 +77,7 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! akka.routing.Broadcast("end") Await.ready(doneLatch, 5 seconds) - replies.values.foreach { _ should be > (0) } + replies.values.foreach { _ should be > 0 } replies.values.sum should ===(iterationCount * connectionCount) } @@ -86,14 +86,14 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val stopLatch = new TestLatch(6) val actor = system.actorOf(RandomPool(6).props(routeeProps = Props(new Actor { - def receive = { - case "hello" => helloLatch.countDown() - } + def receive = { + case "hello" => helloLatch.countDown() + } - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "random-broadcast") + override def postStop(): Unit = { + stopLatch.countDown() + } + })), "random-broadcast") actor ! akka.routing.Broadcast("hello") Await.ready(helloLatch, 5 seconds) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index c3b8ec6d79..50c7f90179 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -227,16 +227,16 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with } val z = routeeSize(router) - z should be > (2) + z should be > 2 Thread.sleep((300 millis).dilated.toMillis) // let it cool down awaitCond({ - router ! 0 // trigger resize - Thread.sleep((20 millis).dilated.toMillis) - routeeSize(router) < z - }, interval = 500.millis.dilated) + router ! 0 // trigger resize + Thread.sleep((20 millis).dilated.toMillis) + routeeSize(router) < z + }, interval = 500.millis.dilated) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala index 76d246e047..3e75e2322e 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala @@ -30,14 +30,14 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val stopLatch = new TestLatch(5) val actor = system.actorOf(RoundRobinPool(5).props(routeeProps = Props(new Actor { - def receive = { - case "hello" => helloLatch.countDown() - } + def receive = { + case "hello" => helloLatch.countDown() + } - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "round-robin-shutdown") + override def postStop(): Unit = { + stopLatch.countDown() + } + })), "round-robin-shutdown") actor ! "hello" actor ! "hello" @@ -59,12 +59,12 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { var replies: Map[Int, Int] = Map.empty.withDefaultValue(0) val actor = system.actorOf(RoundRobinPool(connectionCount).props(routeeProps = Props(new Actor { - lazy val id = counter.getAndIncrement() - def receive = { - case "hit" => sender() ! id - case "end" => doneLatch.countDown() - } - })), "round-robin") + lazy val id = counter.getAndIncrement() + def receive = { + case "hit" => sender() ! id + case "end" => doneLatch.countDown() + } + })), "round-robin") for (_ <- 1 to iterationCount; _ <- 1 to connectionCount) { val id = Await.result((actor ? "hit").mapTo[Int], timeout.duration) @@ -84,14 +84,14 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val stopLatch = new TestLatch(5) val actor = system.actorOf(RoundRobinPool(5).props(routeeProps = Props(new Actor { - def receive = { - case "hello" => helloLatch.countDown() - } + def receive = { + case "hello" => helloLatch.countDown() + } - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "round-robin-broadcast") + override def postStop(): Unit = { + stopLatch.countDown() + } + })), "round-robin-broadcast") actor ! akka.routing.Broadcast("hello") Await.ready(helloLatch, 5 seconds) @@ -102,8 +102,8 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { "be controlled with management messages" in { val actor = system.actorOf(RoundRobinPool(3).props(routeeProps = Props(new Actor { - def receive = Actor.emptyBehavior - })), "round-robin-managed") + def receive = Actor.emptyBehavior + })), "round-robin-managed") routeeSize(actor) should ===(3) actor ! AdjustPoolSize(+4) @@ -130,11 +130,11 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val paths = (1 to connectionCount).map { n => val ref = system.actorOf(Props(new Actor { - def receive = { - case "hit" => sender() ! self.path.name - case "end" => doneLatch.countDown() - } - }), name = "target-" + n) + def receive = { + case "hit" => sender() ! self.path.name + case "end" => doneLatch.countDown() + } + }), name = "target-" + n) ref.path.toStringWithoutAddress } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 141fedde64..7677b9b85e 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -136,15 +136,15 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } "set supplied supervisorStrategy" in { - //#supervision + // #supervision val escalator = OneForOneStrategy() { - //#custom-strategy + // #custom-strategy case e => testActor ! e; SupervisorStrategy.Escalate - //#custom-strategy + // #custom-strategy } val router = system.actorOf(RoundRobinPool(1, supervisorStrategy = escalator).props(routeeProps = Props[TestActor]())) - //#supervision + // #supervision router ! GetRoutees EventFilter[ActorKilledException](occurrences = 1).intercept { expectMsgType[Routees].routees.head.send(Kill, testActor) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala index 072aa79148..ca072c495b 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala @@ -28,9 +28,9 @@ object ScatterGatherFirstCompletedSpec { system.actorOf( Props(new Actor { def receive = { - case Stop(None) => context.stop(self) - case Stop(Some(_id)) if (_id == id) => context.stop(self) - case _id: Int if (_id == id) => + case Stop(None) => context.stop(self) + case Stop(Some(_id)) if _id == id => context.stop(self) + case _id: Int if _id == id => case _ => { Thread.sleep(100 * id) sender() ! id diff --git a/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala index d19057f6f4..a2109b2cf9 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala @@ -51,15 +51,15 @@ class SmallestMailboxSpec extends AkkaSpec with DefaultTimeout with ImplicitSend busy.countDown() val busyPath = usedActors.get(0) - busyPath should not be (null) + busyPath should not be null val path1 = usedActors.get(1) val path2 = usedActors.get(2) val path3 = usedActors.get(3) - path1 should not be (busyPath) - path2 should not be (busyPath) - path3 should not be (busyPath) + path1 should not be busyPath + path2 should not be busyPath + path3 should not be busyPath } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala index e83d8631c0..ccd0dd7428 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala @@ -17,17 +17,17 @@ import akka.testkit._ object TailChoppingSpec { def newActor(id: Int, sleepTime: Duration)(implicit system: ActorSystem) = system.actorOf(Props(new Actor { - var times: Int = _ + var times: Int = _ - def receive = { - case "stop" => context.stop(self) - case "times" => sender() ! times - case _ => - times += 1 - Thread.sleep(sleepTime.toMillis) - sender() ! "ack" - } - }), "Actor:" + id) + def receive = { + case "stop" => context.stop(self) + case "times" => sender() ! times + case _ => + times += 1 + Thread.sleep(sleepTime.toMillis) + sender() ! "ack" + } + }), "Actor:" + id) } class TailChoppingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { diff --git a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala index 94891eb367..141689c693 100644 --- a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala @@ -183,7 +183,7 @@ class BoundedBlockingQueueSpec // queue.take() must happen first Thread.sleep(50) // this is why this test is tagged as TimingTest events should contain(awaitNotEmpty) - events should not contain (poll) + events should not contain poll } "block until the backing queue is non-empty" taggedAs TimingTest in { @@ -557,7 +557,7 @@ class BoundedBlockingQueueSpec val target = mutable.Buffer[String]() elems.foreach(queue.put) queue.drainTo(target.asJava) - elems should contain theSameElementsAs (target) + elems should contain theSameElementsAs target } } @@ -617,7 +617,7 @@ class BoundedBlockingQueueSpec queue.retainAll(elems.asJava) should equal(true) queue.remainingCapacity() should equal(1) queue.toArray() shouldNot contain("Akka") - queue.toArray() should contain theSameElementsAs (elems) + queue.toArray() should contain theSameElementsAs elems } "return false if no elements were removed" in { diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala index 7363df4cd1..29d9ccb1ca 100644 --- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala @@ -313,10 +313,10 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { for (i <- 0 until data.length) builder.putLongPart(data(i), nBytes)(byteOrder) reference.zipWithIndex - .collect({ // Since there is no partial put on LongBuffer, we need to collect only the interesting bytes + .collect { // Since there is no partial put on LongBuffer, we need to collect only the interesting bytes case (r, i) if byteOrder == ByteOrder.LITTLE_ENDIAN && i % elemSize < nBytes => r case (r, i) if byteOrder == ByteOrder.BIG_ENDIAN && i % elemSize >= (elemSize - nBytes) => r - }) + } .toSeq == builder.result() } @@ -886,13 +886,13 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { "calling span" in { check { (a: ByteString, b: Byte) => - likeVector(a)({ _.span(_ != b) match { case (a, b) => (a, b) } }) + likeVector(a) { _.span(_ != b) match { case (a, b) => (a, b) } } } } "calling takeWhile" in { check { (a: ByteString, b: Byte) => - likeVector(a)({ _.takeWhile(_ != b) }) + likeVector(a) { _.takeWhile(_ != b) } } } "calling dropWhile" in { @@ -940,9 +940,9 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVector(xs)({ + likeVector(xs) { _.slice(from, until) - }) + } } } } @@ -951,9 +951,9 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVector(xs)({ + likeVector(xs) { _.drop(from).take(until - from) - }) + } } } } @@ -970,11 +970,11 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVector(xs)({ it => + likeVector(xs) { it => val array = new Array[Byte](xs.length) it.copyToArray(array, from, until) array.toSeq - }) + } } } } @@ -1114,8 +1114,8 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { slice match { case (xs, from, until) => likeVecIt(xs)({ - _.slice(from, until).toSeq - }, strict = false) + _.slice(from, until).toSeq + }, strict = false) } } } @@ -1125,8 +1125,8 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { slice match { case (xs, from, until) => likeVecIt(xs)({ - _.drop(from).take(until - from).toSeq - }, strict = false) + _.drop(from).take(until - from).toSeq + }, strict = false) } } } @@ -1136,10 +1136,10 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { slice match { case (xs, from, until) => likeVecIt(xs)({ it => - val array = new Array[Byte](xs.length) - it.slice(from, until).copyToArray(array, from, until) - array.toSeq - }, strict = false) + val array = new Array[Byte](xs.length) + it.slice(from, until).copyToArray(array, from, until) + array.toSeq + }, strict = false) } } } diff --git a/akka-actor-tests/src/test/scala/akka/util/FrequencyListSpec.scala b/akka-actor-tests/src/test/scala/akka/util/FrequencyListSpec.scala index 75770a9e05..94d9d9e945 100644 --- a/akka-actor-tests/src/test/scala/akka/util/FrequencyListSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/FrequencyListSpec.scala @@ -44,52 +44,52 @@ class FrequencyListSpec extends AnyWordSpec with Matchers { check(frequency, Nil) frequency.update("a") - check(frequency, List( /* 1: */ "a")) + check(frequency, List(/* 1: */ "a")) frequency.update("b").update("c") - check(frequency, List( /* 1: */ "a", "b", "c")) + check(frequency, List(/* 1: */ "a", "b", "c")) frequency.update("a").update("c") - check(frequency, List( /* 1: */ "b", /* 2: */ "a", "c")) + check(frequency, List(/* 1: */ "b", /* 2: */ "a", "c")) frequency.update("d").update("e").update("f").update("g") - check(frequency, List( /* 1: */ "b", "d", "e", "f", "g", /* 2: */ "a", "c")) + check(frequency, List(/* 1: */ "b", "d", "e", "f", "g", /* 2: */ "a", "c")) frequency.update("c").update("f") - check(frequency, List( /* 1: */ "b", "d", "e", "g", /* 2: */ "a", "f", /* 3: */ "c")) + check(frequency, List(/* 1: */ "b", "d", "e", "g", /* 2: */ "a", "f", /* 3: */ "c")) frequency.remove("d").remove("g").remove("b").remove("f") - check(frequency, List( /* 1: */ "e", /* 2: */ "a", /* 3: */ "c")) + check(frequency, List(/* 1: */ "e", /* 2: */ "a", /* 3: */ "c")) frequency.update("e").update("h").update("i") - check(frequency, List( /* 1: */ "h", "i", /* 2: */ "a", "e", /* 3: */ "c")) + check(frequency, List(/* 1: */ "h", "i", /* 2: */ "a", "e", /* 3: */ "c")) frequency.removeLeastFrequent(3) shouldBe List("h", "i", "a") - check(frequency, List( /* 2: */ "e", /* 3: */ "c")) + check(frequency, List(/* 2: */ "e", /* 3: */ "c")) frequency.update("j").update("k").update("l").update("m") - check(frequency, List( /* 1: */ "j", "k", "l", "m", /* 2: */ "e", /* 3: */ "c")) + check(frequency, List(/* 1: */ "j", "k", "l", "m", /* 2: */ "e", /* 3: */ "c")) frequency.removeLeastFrequent(skip = OptionVal.Some("j")) shouldBe List("k") - check(frequency, List( /* 1: */ "j", "l", "m", /* 2: */ "e", /* 3: */ "c")) + check(frequency, List(/* 1: */ "j", "l", "m", /* 2: */ "e", /* 3: */ "c")) frequency.removeLeastFrequent(2, skip = OptionVal.Some("l")) shouldBe List("j", "m") - check(frequency, List( /* 1: */ "l", /* 2: */ "e", /* 3: */ "c")) + check(frequency, List(/* 1: */ "l", /* 2: */ "e", /* 3: */ "c")) frequency.update("n").update("o").update("p").update("e").update("o").update("l") - check(frequency, List( /* 1: */ "n", "p", /* 2: */ "o", "l", /* 3: */ "c", "e")) + check(frequency, List(/* 1: */ "n", "p", /* 2: */ "o", "l", /* 3: */ "c", "e")) frequency.removeMostFrequent(3) shouldBe List("e", "c", "l") - check(frequency, List( /* 1: */ "n", "p", /* 2: */ "o")) + check(frequency, List(/* 1: */ "n", "p", /* 2: */ "o")) frequency.update("q").update("r").update("p").update("o").update("n") - check(frequency, List( /* 1: */ "q", "r", /* 2: */ "p", "n", /* 3: */ "o")) + check(frequency, List(/* 1: */ "q", "r", /* 2: */ "p", "n", /* 3: */ "o")) frequency.removeMostFrequent(skip = OptionVal.Some("o")) shouldBe List("n") - check(frequency, List( /* 1: */ "q", "r", /* 2: */ "p", /* 3: */ "o")) + check(frequency, List(/* 1: */ "q", "r", /* 2: */ "p", /* 3: */ "o")) frequency.removeMostFrequent(2, skip = OptionVal.Some("p")) shouldBe List("o", "r") - check(frequency, List( /* 1: */ "q", /* 2: */ "p")) + check(frequency, List(/* 1: */ "q", /* 2: */ "p")) } "track overall recency of elements when enabled" in { @@ -100,72 +100,72 @@ class FrequencyListSpec extends AnyWordSpec with Matchers { clock.tick() // time = 1 frequency.update("a") - check(frequency, List( /* 1: */ "a")) + check(frequency, List(/* 1: */ "a")) checkRecency(frequency, List("a")) clock.tick() // time = 2 frequency.update("b").update("c") - check(frequency, List( /* 1: */ "a", "b", "c")) + check(frequency, List(/* 1: */ "a", "b", "c")) checkRecency(frequency, List("a", "b", "c")) clock.tick() // time = 3 frequency.update("a").update("c") - check(frequency, List( /* 1: */ "b", /* 2: */ "a", "c")) + check(frequency, List(/* 1: */ "b", /* 2: */ "a", "c")) checkRecency(frequency, List("b", "a", "c")) clock.tick() // time = 4 frequency.update("d").update("e").update("f") - check(frequency, List( /* 1: */ "b", "d", "e", "f", /* 2: */ "a", "c")) + check(frequency, List(/* 1: */ "b", "d", "e", "f", /* 2: */ "a", "c")) checkRecency(frequency, List("b", "a", "c", "d", "e", "f")) clock.tick() // time = 5 frequency.update("c").update("f") - check(frequency, List( /* 1: */ "b", "d", "e", /* 2: */ "a", "f", /* 3: */ "c")) + check(frequency, List(/* 1: */ "b", "d", "e", /* 2: */ "a", "f", /* 3: */ "c")) checkRecency(frequency, List("b", "a", "d", "e", "c", "f")) clock.tick() // time = 6 frequency.remove("d").remove("b").remove("f") - check(frequency, List( /* 1: */ "e", /* 2: */ "a", /* 3: */ "c")) + check(frequency, List(/* 1: */ "e", /* 2: */ "a", /* 3: */ "c")) checkRecency(frequency, List("a", "e", "c")) clock.tick() // time = 7 frequency.update("e").update("h").update("i") - check(frequency, List( /* 1: */ "h", "i", /* 2: */ "a", "e", /* 3: */ "c")) + check(frequency, List(/* 1: */ "h", "i", /* 2: */ "a", "e", /* 3: */ "c")) checkRecency(frequency, List("a", "c", "e", "h", "i")) clock.tick() // time = 8 frequency.removeOverallLeastRecent() shouldBe List("a") - check(frequency, List( /* 1: */ "h", "i", /* 2: */ "e", /* 3: */ "c")) + check(frequency, List(/* 1: */ "h", "i", /* 2: */ "e", /* 3: */ "c")) checkRecency(frequency, List("c", "e", "h", "i")) clock.tick() // time = 9 frequency.update("i").update("j").update("k") - check(frequency, List( /* 1: */ "h", "j", "k", /* 2: */ "e", "i", /* 3: */ "c")) + check(frequency, List(/* 1: */ "h", "j", "k", /* 2: */ "e", "i", /* 3: */ "c")) checkRecency(frequency, List("c", "e", "h", "i", "j", "k")) clock.tick() // time = 10 frequency.removeOverallMostRecent() shouldBe List("k") - check(frequency, List( /* 1: */ "h", "j", /* 2: */ "e", "i", /* 3: */ "c")) + check(frequency, List(/* 1: */ "h", "j", /* 2: */ "e", "i", /* 3: */ "c")) checkRecency(frequency, List("c", "e", "h", "i", "j")) clock.tick() // time = 11 frequency.removeOverallLeastRecentOutside(3.seconds) shouldBe List("c", "e", "h") - check(frequency, List( /* 1: */ "j", /* 2: */ "i")) + check(frequency, List(/* 1: */ "j", /* 2: */ "i")) checkRecency(frequency, List("i", "j")) clock.tick() // time = 12 frequency.update("l").update("m") - check(frequency, List( /* 1: */ "j", "l", "m", /* 2: */ "i")) + check(frequency, List(/* 1: */ "j", "l", "m", /* 2: */ "i")) checkRecency(frequency, List("i", "j", "l", "m")) clock.tick() // time = 13 frequency.removeOverallMostRecentWithin(3.seconds) shouldBe List("m", "l") - check(frequency, List( /* 1: */ "j", /* 2: */ "i")) + check(frequency, List(/* 1: */ "j", /* 2: */ "i")) checkRecency(frequency, List("i", "j")) clock.tick() // time = 14 frequency.update("n").update("o").update("n") - check(frequency, List( /* 1: */ "j", "o", /* 2: */ "i", "n")) + check(frequency, List(/* 1: */ "j", "o", /* 2: */ "i", "n")) checkRecency(frequency, List("i", "j", "o", "n")) } @@ -177,80 +177,80 @@ class FrequencyListSpec extends AnyWordSpec with Matchers { check(aging, Nil) for (_ <- 1 to 10) regular.update("a").update("b").update("c") - check(regular, List( /*10*/ "a", "b", "c")) + check(regular, List(/*10*/ "a", "b", "c")) for (_ <- 1 to 10) aging.update("a").update("b").update("c") - check(aging, List( /*10+0*/ "a", "b", "c")) + check(aging, List(/*10+0*/ "a", "b", "c")) // age = 0 regular.update("x").update("y").update("z") - check(regular, List( /*1*/ "x", "y", "z", /*10*/ "a", "b", "c")) + check(regular, List(/*1*/ "x", "y", "z", /*10*/ "a", "b", "c")) aging.update("x").update("y").update("z") - check(aging, List( /*1+0*/ "x", "y", "z", /*10+0*/ "a", "b", "c")) + check(aging, List(/*1+0*/ "x", "y", "z", /*10+0*/ "a", "b", "c")) regular.removeLeastFrequent() shouldBe List("x") - check(regular, List( /*1*/ "y", "z", /*10*/ "a", "b", "c")) + check(regular, List(/*1*/ "y", "z", /*10*/ "a", "b", "c")) aging.removeLeastFrequent() shouldBe List("x") - check(aging, List( /*1+0*/ "y", "z", /*10+0*/ "a", "b", "c")) + check(aging, List(/*1+0*/ "y", "z", /*10+0*/ "a", "b", "c")) // age = 1 (from last removal of "x") regular.update("x").update("y").update("z").update("z") - check(regular, List( /*1*/ "x", /*2*/ "y", /*3*/ "z", /*10*/ "a", "b", "c")) + check(regular, List(/*1*/ "x", /*2*/ "y", /*3*/ "z", /*10*/ "a", "b", "c")) aging.update("x").update("y").update("z").update("z") - check(aging, List( /*1+1*/ "x", /*2+1*/ "y", /*3+1*/ "z", /*10+0*/ "a", "b", "c")) + check(aging, List(/*1+1*/ "x", /*2+1*/ "y", /*3+1*/ "z", /*10+0*/ "a", "b", "c")) regular.removeLeastFrequent(2) shouldBe List("x", "y") - check(regular, List( /*3*/ "z", /*10*/ "a", "b", "c")) + check(regular, List(/*3*/ "z", /*10*/ "a", "b", "c")) aging.removeLeastFrequent(2) shouldBe List("x", "y") - check(aging, List( /*3+1*/ "z", /*10+0*/ "a", "b", "c")) + check(aging, List(/*3+1*/ "z", /*10+0*/ "a", "b", "c")) // age = 3 (from last removal of "y") regular.update("x").update("y").update("z") - check(regular, List( /*1*/ "x", "y", /*4*/ "z", /*10*/ "a", "b", "c")) + check(regular, List(/*1*/ "x", "y", /*4*/ "z", /*10*/ "a", "b", "c")) aging.update("x").update("y").update("z") - check(aging, List( /*1+3*/ "x", "y", /*4+3*/ "z", /*10+0*/ "a", "b", "c")) + check(aging, List(/*1+3*/ "x", "y", /*4+3*/ "z", /*10+0*/ "a", "b", "c")) regular.removeLeastFrequent(3) shouldBe List("x", "y", "z") - check(regular, List( /*10*/ "a", "b", "c")) + check(regular, List(/*10*/ "a", "b", "c")) aging.removeLeastFrequent(3) shouldBe List("x", "y", "z") - check(aging, List( /*10+0*/ "a", "b", "c")) + check(aging, List(/*10+0*/ "a", "b", "c")) // age = 7 (from last removal of "z") regular.update("x").update("y").update("y").update("z").update("z").update("z") - check(regular, List( /*1*/ "x", /*2*/ "y", /*3*/ "z", /*10*/ "a", "b", "c")) + check(regular, List(/*1*/ "x", /*2*/ "y", /*3*/ "z", /*10*/ "a", "b", "c")) aging.update("x").update("y").update("y").update("z").update("z").update("z") - check(aging, List( /*1+7*/ "x", /*2+7*/ "y", /*10+0*/ "a", "b", "c", /*3+7*/ "z")) + check(aging, List(/*1+7*/ "x", /*2+7*/ "y", /*10+0*/ "a", "b", "c", /*3+7*/ "z")) regular.removeLeastFrequent(2) shouldBe List("x", "y") - check(regular, List( /*3*/ "z", /*10*/ "a", "b", "c")) + check(regular, List(/*3*/ "z", /*10*/ "a", "b", "c")) aging.removeLeastFrequent(2) shouldBe List("x", "y") - check(aging, List( /*10+0*/ "a", "b", "c", /*3+7*/ "z")) + check(aging, List(/*10+0*/ "a", "b", "c", /*3+7*/ "z")) // age = 9 (from last removal of "y") regular.update("x").update("y").update("z") - check(regular, List( /*1*/ "x", "y", /*4*/ "z", /*10*/ "a", "b", "c")) + check(regular, List(/*1*/ "x", "y", /*4*/ "z", /*10*/ "a", "b", "c")) aging.update("x").update("y").update("z") - check(aging, List( /*10+0*/ "a", "b", "c", /*1+9*/ "x", "y", /*4+9*/ "z")) + check(aging, List(/*10+0*/ "a", "b", "c", /*1+9*/ "x", "y", /*4+9*/ "z")) regular.removeLeastFrequent(3) shouldBe List("x", "y", "z") - check(regular, List( /*10*/ "a", "b", "c")) + check(regular, List(/*10*/ "a", "b", "c")) aging.removeLeastFrequent(3) shouldBe List("a", "b", "c") - check(aging, List( /*1+9*/ "x", "y", /*4+9*/ "z")) + check(aging, List(/*1+9*/ "x", "y", /*4+9*/ "z")) // age = 10 (from last removal of "c") } diff --git a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala index 1aa570031a..835d4dd5be 100644 --- a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala @@ -19,9 +19,10 @@ import akka.testkit.DefaultTimeout class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout { implicit val ec: ExecutionContextExecutor = system.dispatcher private def emptyIndex = - new Index[String, Int](100, new Comparator[Int] { - override def compare(a: Int, b: Int): Int = Integer.compare(a, b) - }) + new Index[String, Int](100, + new Comparator[Int] { + override def compare(a: Int, b: Int): Int = Integer.compare(a, b) + }) private def indexWithValues = { val index = emptyIndex @@ -58,11 +59,11 @@ class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout { index.put("s1", 2) index.put("s2", 1) index.put("s2", 2) - //Remove value + // Remove value index.remove("s1", 1) should ===(true) index.remove("s1", 1) should ===(false) index.valueIterator("s1").toSet should ===(Set(2)) - //Remove key + // Remove key index.remove("s2") match { case Some(iter) => iter.toSet should ===(Set(1, 2)) case None => fail() @@ -101,16 +102,17 @@ class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout { index.isEmpty should ===(true) } "be able to be accessed in parallel" in { - val index = new Index[Int, Int](100, new Comparator[Int] { - override def compare(a: Int, b: Int): Int = Integer.compare(a, b) - }) + val index = new Index[Int, Int](100, + new Comparator[Int] { + override def compare(a: Int, b: Int): Int = Integer.compare(a, b) + }) val nrOfTasks = 10000 val nrOfKeys = 10 val nrOfValues = 10 - //Fill index + // Fill index for (key <- 0 until nrOfKeys; value <- 0 until nrOfValues) index.put(key, value) - //Tasks to be executed in parallel + // Tasks to be executed in parallel def putTask() = Future { index.put(Random.nextInt(nrOfKeys), Random.nextInt(nrOfValues)) } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala index a652757d04..6e232ed956 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala @@ -488,9 +488,9 @@ class DeferredScalaBehaviorSpec extends ImmutableWithSignalScalaBehaviorSpec { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Done]("deferredListener") (SBehaviors.setup(_ => { - inbox.ref ! Done - super.behavior(monitor)._1 - }), inbox) + inbox.ref ! Done + super.behavior(monitor)._1 + }), inbox) } override def checkAux(signal: Signal, aux: Aux): Unit = @@ -594,10 +594,12 @@ class ImmutableJavaBehaviorSpec extends Messages with Become with Stoppable { class TransformMessagesJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec with Reuse with Siphon { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Command]("transformMessagesListener") - JBehaviors.transformMessages(classOf[Command], super.behavior(monitor)._1, pf(_.`match`(classOf[Command], fi(x => { - inbox.ref ! x - x - })))) -> inbox + JBehaviors.transformMessages(classOf[Command], super.behavior(monitor)._1, + pf(_.`match`(classOf[Command], + fi(x => { + inbox.ref ! x + x + })))) -> inbox } } @@ -607,9 +609,9 @@ class DeferredJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Done]("deferredListener") (JBehaviors.setup(_ => { - inbox.ref ! Done - super.behavior(monitor)._1 - }), inbox) + inbox.ref ! Done + super.behavior(monitor)._1 + }), inbox) } override def checkAux(signal: Signal, aux: Aux): Unit = diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala index e6bca05641..2bc367f321 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala @@ -120,10 +120,11 @@ class DeferredSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with L // monitor is implemented with tap, so this is testing both val probe = TestProbe[Event]("evt") val monitorProbe = TestProbe[Command]("monitor") - val behv = Behaviors.monitor(monitorProbe.ref, Behaviors.setup[Command] { _ => - probe.ref ! Started - target(probe.ref) - }) + val behv = Behaviors.monitor(monitorProbe.ref, + Behaviors.setup[Command] { _ => + probe.ref ! Started + target(probe.ref) + }) probe.expectNoMessage() // not yet val ref = spawn(behv) // it's supposed to be created immediately (not waiting for first message) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala index 25397497e2..cb3472a8bd 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala @@ -483,10 +483,11 @@ class InterceptSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "be possible to combine with MDC" in { val probe = createTestProbe[String]() val ref = spawn(Behaviors.setup[Command] { _ => - Behaviors.withMdc(staticMdc = Map("x" -> "y"), mdcForMessage = (msg: Command) => { - probe.ref ! s"mdc:${msg.s.toUpperCase()}" - Map("msg" -> msg.s.toUpperCase()) - }) { + Behaviors.withMdc(staticMdc = Map("x" -> "y"), + mdcForMessage = (msg: Command) => { + probe.ref ! s"mdc:${msg.s.toUpperCase()}" + Map("msg" -> msg.s.toUpperCase()) + }) { MultiProtocol(probe.ref) } }) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala index 2261e9028a..6907f8590f 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala @@ -66,14 +66,14 @@ class MailboxSelectorSpec extends ScalaTestWithActorTestKit(""" val latch = new CountDownLatch(1) val probe = testKit.createTestProbe[String]() val actor = spawn(Behaviors.receiveMessage[String] { - case "one" => - // block here so we can fill mailbox up - probe ! "blocking-on-one" - latch.await(10, TimeUnit.SECONDS) - Behaviors.same - case _ => - Behaviors.same - }, MailboxSelector.bounded(2)) + case "one" => + // block here so we can fill mailbox up + probe ! "blocking-on-one" + latch.await(10, TimeUnit.SECONDS) + Behaviors.same + case _ => + Behaviors.same + }, MailboxSelector.bounded(2)) actor ! "one" // actor will block here probe.expectMessage("blocking-on-one") actor ! "two" diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala index 80c0329871..6d66dbd386 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala @@ -160,14 +160,14 @@ class WatchSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC spawn( Behaviors.setup[Any] { context => val middleManagement = context.spawn(Behaviors.setup[Any] { context => - val sixPackJoe = context.spawn(Behaviors.receive[Any]((_, _) => throw ex), "joe") - context.watch(sixPackJoe) + val sixPackJoe = context.spawn(Behaviors.receive[Any]((_, _) => throw ex), "joe") + context.watch(sixPackJoe) - Behaviors.receive[Any] { (_, message) => - sixPackJoe ! message - Behaviors.same - } // no handling of terminated, even though we watched!!! - }, "middle-management") + Behaviors.receive[Any] { (_, message) => + sixPackJoe ! message + Behaviors.same + } // no handling of terminated, even though we watched!!! + }, "middle-management") context.watch(middleManagement) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala index 7838f58280..17ca1733d3 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala @@ -103,7 +103,8 @@ class TestConsumer( case job @ SomeAsyncJob(_, confirmTo, producerId, seqNr) => // when replacing producer the seqNr may start from 1 again val cleanProcessed = - if (seqNr == 1L) processed.filterNot { case (pid, _) => pid == producerId } else processed + if (seqNr == 1L) processed.filterNot { case (pid, _) => pid == producerId } + else processed if (cleanProcessed((producerId, seqNr))) throw new RuntimeException(s"Received duplicate [($producerId,$seqNr)]") diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala index e796ce64c8..463a601fb1 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala @@ -62,10 +62,11 @@ class ActorSystemSpec "An ActorSystem" must { "start the guardian actor and terminate when it terminates" in { - withSystem("a", Behaviors.receiveMessage[Probe] { p => - p.replyTo ! p.message - Behaviors.stopped - }, doTerminate = false) { sys => + withSystem("a", + Behaviors.receiveMessage[Probe] { p => + p.replyTo ! p.message + Behaviors.stopped + }, doTerminate = false) { sys => val inbox = TestInbox[String]("a") sys ! Probe("hello", inbox.ref) eventually { @@ -92,13 +93,13 @@ class ActorSystemSpec "terminate the guardian actor" in { val inbox = TestInbox[String]("terminate") val sys = system(Behaviors.setup[Any] { _ => - inbox.ref ! "started" - Behaviors.receiveSignal { - case (_, PostStop) => - inbox.ref ! "done" - Behaviors.same - } - }, "terminate") + inbox.ref ! "started" + Behaviors.receiveSignal { + case (_, PostStop) => + inbox.ref ! "done" + Behaviors.same + } + }, "terminate") eventually { inbox.hasMessages should ===(true) @@ -114,8 +115,8 @@ class ActorSystemSpec "be able to terminate immediately" in { val sys = system(Behaviors.receiveMessage[Probe] { _ => - Behaviors.unhandled - }, "terminate") + Behaviors.unhandled + }, "terminate") // for this case the guardian might not have been started before // the system terminates and then it will not receive PostStop, which // is OK since it wasn't really started yet diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala index 0311b501eb..f51a4f8fee 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala @@ -72,10 +72,11 @@ class AdaptationFailureSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi val probe = createTestProbe[Any]() val threw = Promise[Done]() val ref = spawn(Behaviors.setup[Any] { ctx => - val adapter = ctx.messageAdapter[Any](classOf[Any], { _ => - threw.success(Done) - throw TestException("boom") - }) + val adapter = ctx.messageAdapter[Any](classOf[Any], + { _ => + threw.success(Done) + throw TestException("boom") + }) adapter ! "go boom" behavior }) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala index d95c12c802..1b6d3aef45 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala @@ -44,9 +44,9 @@ class ActorContextAskSpec case class Pong(selfName: String, threadName: String) val pingPong = spawn(Behaviors.receive[Ping] { (context, message) => - message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) - Behaviors.same - }, "ping-pong", Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) + message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) + Behaviors.same + }, "ping-pong", Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) val probe = TestProbe[Pong]() diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala index 2f71d4fd0b..1914fd72bf 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala @@ -119,43 +119,43 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" } "contain the class name where the first log was called" in { - val eventFilter = LoggingTestKit.custom({ + val eventFilter = LoggingTestKit.custom { case event if event.loggerName == classOf[ActorLoggingSpec].getName => true case event => println(event.loggerName) false - }) + } eventFilter.expect(spawn(Behaviors.setup[String] { context => - context.log.info("Started") + context.log.info("Started") - Behaviors.receive { (context, message) => - context.log.info("got message {}", message) - Behaviors.same - } - }, "the-actor-with-class")) + Behaviors.receive { (context, message) => + context.log.info("got message {}", message) + Behaviors.same + } + }, "the-actor-with-class")) } "contain the object class name where the first log was called" in { - val eventFilter = LoggingTestKit.custom({ + val eventFilter = LoggingTestKit.custom { case event if event.loggerName == WhereTheBehaviorIsDefined.getClass.getName => true case other => println(other.loggerName) false - }) + } eventFilter.expect(spawn(WhereTheBehaviorIsDefined.behavior, "the-actor-with-object")) } "contain the abstract behavior class name where the first log was called" in { - val eventFilter = LoggingTestKit.custom({ + val eventFilter = LoggingTestKit.custom { case event if event.loggerName == classOf[BehaviorWhereTheLoggerIsUsed].getName => true case other => println(other.loggerName) false - }) + } eventFilter.expect { spawn(Behaviors.setup[String](context => new BehaviorWhereTheLoggerIsUsed(context)), "the-actor-with-behavior") @@ -200,7 +200,7 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" true // any is fine, we're just after the right count of statements reaching the listener } .withOccurrences(36) - .expect({ + .expect { spawn(Behaviors.setup[String] { context => context.log.debug("message") @@ -247,7 +247,7 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" Behaviors.stopped }) - }) + } } "use Slf4jLogger from akka-slf4j automatically" in { @@ -490,8 +490,7 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" Map( ActorMdc.AkkaAddressKey -> system.classicSystem.asInstanceOf[ExtendedActorSystem].provider.addressString, ActorMdc.AkkaSourceKey -> actorPath.get.toString, - ActorMdc.SourceActorSystemKey -> system.name) - ) + ActorMdc.SourceActorSystemKey -> system.name)) true } catch { case ex: Throwable => diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala index 2f3baa0e5b..c9575d3fc7 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala @@ -23,16 +23,16 @@ final class GracefulStopSpec extends ScalaTestWithActorTestKit with AnyWordSpecL val behavior = Behaviors.setup[akka.NotUsed] { context => context.spawn[NotUsed](Behaviors.receiveSignal { - case (_, PostStop) => - probe.ref ! "child-done" - Behaviors.stopped - }, "child1") + case (_, PostStop) => + probe.ref ! "child-done" + Behaviors.stopped + }, "child1") context.spawn[NotUsed](Behaviors.receiveSignal { - case (_, PostStop) => - probe.ref ! "child-done" - Behaviors.stopped - }, "child2") + case (_, PostStop) => + probe.ref ! "child-done" + Behaviors.stopped + }, "child2") Behaviors.stopped { () => // cleanup function body diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala index a28f2b90be..fed724f49e 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala @@ -70,9 +70,9 @@ class MessageAdapterSpec case class AnotherPong(selfName: String, threadName: String) val pingPong = spawn(Behaviors.receive[Ping] { (context, message) => - message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) - Behaviors.same - }, "ping-pong", Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) + message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) + Behaviors.same + }, "ping-pong", Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) val probe = TestProbe[AnotherPong]() diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala index bfd7cfd544..2e9e78a53e 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala @@ -35,8 +35,8 @@ final class OnSignalSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike def stopper(probe: TestProbe[Done], children: Int) = Behaviors.setup[String] { ctx => (0 until children).foreach { i => ctx.spawn(Behaviors.receiveMessage[String] { _ => - Behaviors.same - }, s"$i") + Behaviors.same + }, s"$i") } Behaviors .receiveMessagePartial[String] { diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala index 28a0884ed5..26fe75b9b6 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala @@ -213,12 +213,12 @@ class AdapterSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with var systemN: akka.actor.typed.ActorSystem[Done] = null try { systemN = ActorSystem.create(Behaviors.receive[Done] { (context, message) => - context.self ! Done - message match { - case Done => Behaviors.stopped - } + context.self ! Done + message match { + case Done => Behaviors.stopped + } - }, "AdapterSpec-stopping-guardian-2") + }, "AdapterSpec-stopping-guardian-2") } finally if (system != null) TestKit.shutdownActorSystem(systemN.toClassic) } @@ -231,7 +231,7 @@ class AdapterSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with } "allow seamless access to untyped extensions" in { - SerializationExtension(typedSystem) should not be (null) + SerializationExtension(typedSystem) should not be null } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala index 0784dfe310..444e4de0f1 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala @@ -14,7 +14,7 @@ import org.scalatest.wordspec.AnyWordSpecLike object AggregatorSpec { object IllustrateUsage { - //#usage + // #usage object Hotel1 { final case class RequestQuote(replyTo: ActorRef[Quote]) final case class Quote(hotel: String, price: BigDecimal) @@ -65,7 +65,7 @@ object AggregatorSpec { } } } - //#usage + // #usage } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala index 645ed7afc7..5f34f92542 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala @@ -42,7 +42,7 @@ object DispatchersDocSpec { val yourBehavior: Behavior[String] = Behaviors.same val example = Behaviors.receive[Any] { (context, _) => - //#spawn-dispatcher + // #spawn-dispatcher import akka.actor.typed.DispatcherSelector context.spawn(yourBehavior, "DefaultDispatcher") @@ -50,7 +50,7 @@ object DispatchersDocSpec { context.spawn(yourBehavior, "BlockingDispatcher", DispatcherSelector.blocking()) context.spawn(yourBehavior, "ParentDispatcher", DispatcherSelector.sameAsParent()) context.spawn(yourBehavior, "DispatcherFromConfig", DispatcherSelector.fromConfig("your-dispatcher")) - //#spawn-dispatcher + // #spawn-dispatcher Behaviors.same } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala index 56db8238b7..b094f36bd2 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala @@ -16,10 +16,10 @@ import org.scalatest.wordspec.AnyWordSpecLike object FSMDocSpec { - //#simple-state - //#simple-events + // #simple-state + // #simple-events object Buncher { - //#simple-state + // #simple-state // FSM event becomes the type of the message Actor supports sealed trait Event @@ -27,17 +27,17 @@ object FSMDocSpec { final case class Queue(obj: Any) extends Event case object Flush extends Event private case object Timeout extends Event - //#simple-events + // #simple-events - //#storing-state + // #storing-state sealed trait Data case object Uninitialized extends Data final case class Todo(target: ActorRef[Batch], queue: immutable.Seq[Any]) extends Data final case class Batch(obj: immutable.Seq[Any]) - //#storing-state + // #storing-state - //#simple-state + // #simple-state // states of the FSM represented as behaviors // initial state @@ -67,10 +67,10 @@ object FSMDocSpec { } } - //#simple-events + // #simple-events } - //#simple-events - //#simple-state + // #simple-events + // #simple-state } class FSMDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing { diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala index e96f7fe0a5..4c023cec2e 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala @@ -22,7 +22,7 @@ import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit object GracefulStopDocSpec { - //#master-actor + // #master-actor object MasterControlProgram { sealed trait Command @@ -51,9 +51,9 @@ object GracefulStopDocSpec { } } } - //#master-actor + // #master-actor - //#worker-actor + // #worker-actor object Job { sealed trait Command @@ -66,10 +66,10 @@ object GracefulStopDocSpec { } } } - //#worker-actor + // #worker-actor object IllustrateWatch { - //#master-actor-watch + // #master-actor-watch object MasterControlProgram { sealed trait Command @@ -93,11 +93,11 @@ object GracefulStopDocSpec { } } } - //#master-actor-watch + // #master-actor-watch } object IllustrateWatchWith { - //#master-actor-watchWith + // #master-actor-watchWith object MasterControlProgram { sealed trait Command @@ -121,7 +121,7 @@ object GracefulStopDocSpec { } } } - //#master-actor-watchWith + // #master-actor-watchWith } } @@ -133,7 +133,7 @@ class GracefulStopDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike "Graceful stop example" must { "start some workers" in { - //#start-workers + // #start-workers import MasterControlProgram._ val system: ActorSystem[Command] = ActorSystem(MasterControlProgram(), "B6700") @@ -148,7 +148,7 @@ class GracefulStopDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike system.terminate() Await.result(system.whenTerminated, 3.seconds) - //#start-workers + // #start-workers } "gracefully stop workers and master" in { diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala index 76dff0df9f..5a36eee90b 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala @@ -170,7 +170,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec "contain a sample for scheduling messages to self" in { - //#timer + // #timer object Buncher { sealed trait Command @@ -214,7 +214,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec } } } - //#timer + // #timer val probe = createTestProbe[Buncher.Batch]() val buncher: ActorRef[Buncher.Command] = spawn(Buncher(probe.ref, 1.second, 10)) @@ -581,7 +581,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec } "contain a sample for pipeToSelf" in { - //#pipeToSelf + // #pipeToSelf trait CustomerDataAccess { def update(value: Customer): Future[Done] @@ -632,7 +632,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec } } } - //#pipeToSelf + // #pipeToSelf val dataAccess = new CustomerDataAccess { override def update(value: Customer): Future[Done] = Future.successful(Done) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala index 76ce758bd1..257f69d342 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala @@ -109,7 +109,7 @@ object IntroSpec { final case class SayHello(name: String) - //#hello-world-main-with-dispatchers + // #hello-world-main-with-dispatchers def apply(): Behavior[SayHello] = Behaviors.setup { context => val dispatcherPath = "akka.actor.default-blocking-io-dispatcher" @@ -124,21 +124,21 @@ object IntroSpec { Behaviors.same } } - //#hello-world-main-with-dispatchers + // #hello-world-main-with-dispatchers } } - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior object ChatRoom { - //#chatroom-behavior + // #chatroom-behavior sealed trait RoomCommand final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) extends RoomCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior private final case class PublishSessionMessage(screenName: String, message: String) extends RoomCommand - //#chatroom-behavior - //#chatroom-protocol + // #chatroom-behavior + // #chatroom-protocol sealed trait SessionEvent final case class SessionGranted(handle: ActorRef[PostMessage]) extends SessionEvent @@ -148,8 +148,8 @@ object IntroSpec { sealed trait SessionCommand final case class PostMessage(message: String) extends SessionCommand private final case class NotifyClient(message: MessagePosted) extends SessionCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior def apply(): Behavior[RoomCommand] = chatRoom(List.empty) @@ -185,24 +185,24 @@ object IntroSpec { client ! message Behaviors.same } - //#chatroom-protocol + // #chatroom-protocol } - //#chatroom-behavior - //#chatroom-protocol + // #chatroom-behavior + // #chatroom-protocol - //#chatroom-gabbler + // #chatroom-gabbler object Gabbler { import ChatRoom._ def apply(): Behavior[SessionEvent] = Behaviors.setup { context => Behaviors.receiveMessage { - //#chatroom-gabbler + // #chatroom-gabbler // We document that the compiler warns about the missing handler for `SessionDenied` case SessionDenied(reason) => context.log.info("cannot start chat room session: {}", reason) Behaviors.stopped - //#chatroom-gabbler + // #chatroom-gabbler case SessionGranted(handle) => handle ! PostMessage("Hello World!") Behaviors.same @@ -212,9 +212,9 @@ object IntroSpec { } } } - //#chatroom-gabbler + // #chatroom-gabbler - //#chatroom-main + // #chatroom-main object Main { def apply(): Behavior[NotUsed] = Behaviors.setup { context => @@ -234,7 +234,7 @@ object IntroSpec { } } - //#chatroom-main + // #chatroom-main } @@ -244,7 +244,7 @@ class IntroSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC "Intro sample" must { "say hello" in { - //#hello-world + // #hello-world val system: ActorSystem[HelloWorldMain.SayHello] = ActorSystem(HelloWorldMain(), "hello") @@ -252,7 +252,7 @@ class IntroSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC system ! HelloWorldMain.SayHello("World") system ! HelloWorldMain.SayHello("Akka") - //#hello-world + // #hello-world Thread.sleep(500) // it will not fail if too short ActorTestKit.shutdown(system) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala index c6a2b269ca..d0d0f9eca4 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala @@ -26,14 +26,14 @@ object LoggingDocExamples { def howToUse(): Unit = { - //#context-log + // #context-log Behaviors.receive[String] { (context, message) => context.log.info("Received message: {}", message) Behaviors.same } - //#context-log + // #context-log - //#logger-name + // #logger-name Behaviors.setup[String] { context => context.setLoggerName("com.myservice.BackendManager") context.log.info("Starting up") @@ -43,9 +43,9 @@ object LoggingDocExamples { Behaviors.same } } - //#logger-name + // #logger-name - //#logger-factory + // #logger-factory val log = LoggerFactory.getLogger("com.myservice.BackendTask") Future { @@ -55,21 +55,21 @@ object LoggingDocExamples { case Success(result) => log.info("Task completed: {}", result) case Failure(exc) => log.error("Task failed", exc) } - //#logger-factory + // #logger-factory } def placeholders(): Unit = { - //#info2 + // #info2 import akka.actor.typed.scaladsl.LoggerOps Behaviors.receive[String] { (context, message) => context.log.info2("{} received message: {}", context.self.path.name, message) Behaviors.same } - //#info2 + // #info2 - //#infoN + // #infoN import akka.actor.typed.scaladsl.LoggerOps Behaviors.receive[String] { (context, message) => @@ -80,23 +80,23 @@ object LoggingDocExamples { message.take(10)) Behaviors.same } - //#infoN + // #infoN } def logMessages(): Unit = { - //#logMessages + // #logMessages import akka.actor.typed.LogOptions import org.slf4j.event.Level Behaviors.logMessages(LogOptions().withLevel(Level.TRACE), BackendManager()) - //#logMessages + // #logMessages } def withMdc(): Unit = { val system: ActorSystem[_] = ??? - //#withMdc + // #withMdc val staticMdc = Map("startTime" -> system.startTime.toString) Behaviors.withMdc[BackendManager.Command]( staticMdc, @@ -104,7 +104,7 @@ object LoggingDocExamples { (msg: BackendManager.Command) => Map("identifier" -> msg.identifier, "upTime" -> system.uptime.toString)) { BackendManager() } - //#withMdc + // #withMdc } def logging(): Unit = { @@ -112,18 +112,18 @@ object LoggingDocExamples { final case class Message(s: String) val ref: ActorRef[Message] = ??? - //#test-logging + // #test-logging import akka.actor.testkit.typed.scaladsl.LoggingTestKit // implicit ActorSystem is needed, but that is given by ScalaTestWithActorTestKit - //implicit val system: ActorSystem[_] + // implicit val system: ActorSystem[_] LoggingTestKit.info("Received message").expect { ref ! Message("hello") } - //#test-logging + // #test-logging - //#test-logging-criteria + // #test-logging-criteria LoggingTestKit .error[IllegalArgumentException] .withMessageRegex(".*was rejected.*expecting ascii input.*") @@ -138,15 +138,15 @@ object LoggingDocExamples { ref ! Message("hellö") ref ! Message("hejdå") } - //#test-logging-criteria + // #test-logging-criteria } def tagsExample(): Unit = { Behaviors.setup[AnyRef] { context => val myBehavior = Behaviors.empty[AnyRef] - //#tags + // #tags context.spawn(myBehavior, "MyActor", ActorTags("processing")) - //#tags + // #tags Behaviors.stopped } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala index 656f3374da..7d31984cbb 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala @@ -22,17 +22,17 @@ import org.scalatest.wordspec.AnyWordSpecLike object OOIntroSpec { - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior object ChatRoom { - //#chatroom-behavior + // #chatroom-behavior sealed trait RoomCommand final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) extends RoomCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior private final case class PublishSessionMessage(screenName: String, message: String) extends RoomCommand - //#chatroom-behavior - //#chatroom-protocol + // #chatroom-behavior + // #chatroom-protocol sealed trait SessionEvent final case class SessionGranted(handle: ActorRef[PostMessage]) extends SessionEvent @@ -42,8 +42,8 @@ object OOIntroSpec { sealed trait SessionCommand final case class PostMessage(message: String) extends SessionCommand private final case class NotifyClient(message: MessagePosted) extends SessionCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior def apply(): Behavior[RoomCommand] = Behaviors.setup(context => new ChatRoomBehavior(context)) @@ -96,12 +96,12 @@ object OOIntroSpec { Behaviors.same } } - //#chatroom-protocol + // #chatroom-protocol } - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior - //#chatroom-gabbler + // #chatroom-gabbler object Gabbler { import ChatRoom._ @@ -119,10 +119,10 @@ object OOIntroSpec { Behaviors.stopped } } - //#chatroom-gabbler + // #chatroom-gabbler } - //#chatroom-main + // #chatroom-main object Main { def apply(): Behavior[NotUsed] = Behaviors.setup { context => @@ -142,7 +142,7 @@ object OOIntroSpec { } } - //#chatroom-main + // #chatroom-main } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala index 55cf8cc417..c9002d352a 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala @@ -33,7 +33,7 @@ object RouterSpec { // #routee - //intentionally out of the routee section + // intentionally out of the routee section class DoBroadcastLog(text: String) extends Worker.DoLog(text) object DoBroadcastLog { def apply(text: String) = new DoBroadcastLog(text) @@ -93,17 +93,17 @@ class RouterSpec extends ScalaTestWithActorTestKit("akka.loglevel=warning") with val alternativeRouter = ctx.spawn(alternativePool, "alternative-pool") alternativeRouter ! Worker.DoLog("msg") - //#pool + // #pool // #broadcast val poolWithBroadcast = pool.withBroadcastPredicate(_.isInstanceOf[DoBroadcastLog]) val routerWithBroadcast = ctx.spawn(poolWithBroadcast, "pool-with-broadcast") - //this will be sent to all 4 routees + // this will be sent to all 4 routees routerWithBroadcast ! DoBroadcastLog("msg") Behaviors.empty // #broadcast } - //#pool + // #pool ) probe.receiveMessages(15) @@ -164,17 +164,17 @@ class RouterSpec extends ScalaTestWithActorTestKit("akka.loglevel=warning") with } } - //registering proxies + // registering proxies val proxy1 = spawn(Proxy(probe1.ref)) val proxy2 = spawn(Proxy(probe2.ref)) val waiterProbe = createTestProbe[Receptionist.Registered]() system.receptionist ! Receptionist.Register(Proxy.RegisteringKey, proxy1, waiterProbe.ref) system.receptionist ! Receptionist.Register(Proxy.RegisteringKey, proxy2, waiterProbe.ref) - //wait until both registrations get Receptionist.Registered + // wait until both registrations get Receptionist.Registered waiterProbe.receiveMessages(2) - //messages sent to a router with consistent hashing + // messages sent to a router with consistent hashing // #consistent-hashing val router = spawn(Routers.group(Proxy.RegisteringKey).withConsistentHashingRouting(10, Proxy.mapping)) @@ -185,8 +185,8 @@ class RouterSpec extends ScalaTestWithActorTestKit("akka.loglevel=warning") with router ! Proxy.Message("zh3", "Text4") // the hash is calculated over the Proxy.Message first parameter obtained through the Proxy.mapping function // #consistent-hashing - //Then messages with equal Message.id reach the same actor - //so the first message in each probe queue is equal to its second + // Then messages with equal Message.id reach the same actor + // so the first message in each probe queue is equal to its second probe1.receiveMessage() shouldBe probe1.receiveMessage() probe2.receiveMessage() shouldBe probe2.receiveMessage() diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala index 1ed7e41323..03429531bd 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala @@ -34,7 +34,7 @@ object SpawnProtocolDocSpec { // Silent because we want to name the unused 'context' parameter @nowarn("msg=never used") - //#main + // #main object HelloWorldMain { def apply(): Behavior[SpawnProtocol.Command] = Behaviors.setup { context => @@ -44,7 +44,7 @@ object SpawnProtocolDocSpec { SpawnProtocol() } } - //#main + // #main } class SpawnProtocolDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing { @@ -53,7 +53,7 @@ class SpawnProtocolDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLik "ActorSystem with SpawnProtocol" must { "be able to spawn actors" in { - //#system-spawn + // #system-spawn implicit val system: ActorSystem[SpawnProtocol.Command] = ActorSystem(HelloWorldMain(), "hello") @@ -78,7 +78,7 @@ class SpawnProtocolDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLik greeterRef ! HelloWorld.Greet("Akka", replyToRef) } - //#system-spawn + // #system-spawn Thread.sleep(500) // it will not fail if too short ActorTestKit.shutdown(system) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala index 15092ec4bb..2bee8f5c71 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala @@ -31,15 +31,15 @@ object StyleGuideDocExamples { object FunctionalStyle { - //#fun-style + // #fun-style - //#messages + // #messages object Counter { sealed trait Command case object Increment extends Command final case class GetValue(replyTo: ActorRef[Value]) extends Command final case class Value(n: Int) - //#messages + // #messages def apply(): Behavior[Command] = counter(0) @@ -56,16 +56,16 @@ object StyleGuideDocExamples { Behaviors.same } } - //#messages + // #messages } - //#messages - //#fun-style + // #messages + // #fun-style } object OOStyle { - //#oo-style + // #oo-style object Counter { sealed trait Command @@ -95,7 +95,7 @@ object StyleGuideDocExamples { } } } - //#oo-style + // #oo-style } @@ -270,7 +270,7 @@ object StyleGuideDocExamples { } object FactoryMethod { - //#behavior-factory-method + // #behavior-factory-method object CountDown { sealed trait Command case object Down extends Command @@ -284,7 +284,7 @@ object StyleGuideDocExamples { import CountDown._ private def counter(remaining: Int): Behavior[Command] = { - //#exhastivness-check + // #exhastivness-check Behaviors.receiveMessage { case Down => if (remaining == 1) { @@ -293,28 +293,28 @@ object StyleGuideDocExamples { } else counter(remaining - 1) } - //#exhastivness-check + // #exhastivness-check } } - //#behavior-factory-method + // #behavior-factory-method object Usage { val context: ActorContext[_] = ??? val doneRef: ActorRef[Done] = ??? - //#behavior-factory-method-spawn + // #behavior-factory-method-spawn val countDown = context.spawn(CountDown(100, doneRef), "countDown") - //#behavior-factory-method-spawn + // #behavior-factory-method-spawn - //#message-prefix-in-tell + // #message-prefix-in-tell countDown ! CountDown.Down - //#message-prefix-in-tell + // #message-prefix-in-tell } } object Messages { - //#message-protocol + // #message-protocol object CounterProtocol { sealed trait Command @@ -325,11 +325,11 @@ object StyleGuideDocExamples { case object Confirmed extends OperationResult final case class Rejected(reason: String) extends OperationResult } - //#message-protocol + // #message-protocol } object PublicVsPrivateMessages1 { - //#public-private-messages-1 + // #public-private-messages-1 object Counter { sealed trait Command case object Increment extends Command @@ -366,11 +366,11 @@ object StyleGuideDocExamples { Behaviors.same } } - //#public-private-messages-1 + // #public-private-messages-1 } object PublicVsPrivateMessages2 { - //#public-private-messages-2 + // #public-private-messages-2 // above example is preferred, but this is possible and not wrong object Counter { // The type of all public and private messages the Counter actor handles @@ -417,7 +417,7 @@ object StyleGuideDocExamples { Behaviors.same } } - //#public-private-messages-2 + // #public-private-messages-2 } object Ask { @@ -425,7 +425,7 @@ object StyleGuideDocExamples { implicit val system: ActorSystem[Nothing] = ??? - //#ask-1 + // #ask-1 import akka.actor.typed.scaladsl.AskPattern._ import akka.util.Timeout @@ -433,11 +433,11 @@ object StyleGuideDocExamples { val counter: ActorRef[Command] = ??? val result: Future[OperationResult] = counter.ask(replyTo => Increment(delta = 2, replyTo)) - //#ask-1 + // #ask-1 - //#ask-2 + // #ask-2 val result2: Future[OperationResult] = counter.ask(Increment(delta = 2, _)) - //#ask-2 + // #ask-2 /* //#ask-3 @@ -446,26 +446,26 @@ object StyleGuideDocExamples { //#ask-3 */ - //#ask-4 + // #ask-4 val result3: Future[OperationResult] = counter ? (Increment(delta = 2, _)) - //#ask-4 + // #ask-4 } object ExhaustivenessCheck { object CountDown { - //#messages-sealed + // #messages-sealed sealed trait Command case object Down extends Command final case class GetValue(replyTo: ActorRef[Value]) extends Command final case class Value(n: Int) - //#messages-sealed + // #messages-sealed } class CountDown() { import CountDown._ - //#pattern-match-unhandled + // #pattern-match-unhandled val zero: Behavior[Command] = { Behaviors.receiveMessage { case GetValue(replyTo) => @@ -475,11 +475,11 @@ object StyleGuideDocExamples { Behaviors.unhandled } } - //#pattern-match-unhandled + // #pattern-match-unhandled @nowarn object partial { - //#pattern-match-partial + // #pattern-match-partial val zero: Behavior[Command] = { Behaviors.receiveMessagePartial { case GetValue(replyTo) => @@ -487,7 +487,7 @@ object StyleGuideDocExamples { Behaviors.same } } - //#pattern-match-partial + // #pattern-match-partial } } @@ -495,22 +495,22 @@ object StyleGuideDocExamples { object BehaviorCompositionWithPartialFunction { - //#messages-sealed-composition + // #messages-sealed-composition sealed trait Command case object Down extends Command final case class GetValue(replyTo: ActorRef[Value]) extends Command final case class Value(n: Int) - //#messages-sealed-composition + // #messages-sealed-composition - //#get-handler-partial + // #get-handler-partial def getHandler(value: Int): PartialFunction[Command, Behavior[Command]] = { case GetValue(replyTo) => replyTo ! Value(value) Behaviors.same } - //#get-handler-partial + // #get-handler-partial - //#set-handler-non-zero-partial + // #set-handler-non-zero-partial def setHandlerNotZero(value: Int): PartialFunction[Command, Behavior[Command]] = { case Down => if (value == 1) @@ -518,17 +518,17 @@ object StyleGuideDocExamples { else nonZero(value - 1) } - //#set-handler-non-zero-partial + // #set-handler-non-zero-partial - //#set-handler-zero-partial + // #set-handler-zero-partial def setHandlerZero(log: Logger): PartialFunction[Command, Behavior[Command]] = { case Down => log.error("Counter is already at zero!") Behaviors.same } - //#set-handler-zero-partial + // #set-handler-zero-partial - //#top-level-behaviors-partial + // #top-level-behaviors-partial val zero: Behavior[Command] = Behaviors.setup { context => Behaviors.receiveMessagePartial(getHandler(0).orElse(setHandlerZero(context.log))) } @@ -538,13 +538,13 @@ object StyleGuideDocExamples { // Default Initial Behavior for this actor def apply(initialCapacity: Int): Behavior[Command] = nonZero(initialCapacity) - //#top-level-behaviors-partial + // #top-level-behaviors-partial } object NestingSample1 { sealed trait Command - //#nesting + // #nesting def apply(): Behavior[Command] = Behaviors.setup[Command](context => Behaviors.withStash(100)(stash => @@ -552,19 +552,19 @@ object StyleGuideDocExamples { context.log.debug("Starting up") // behavior using context, stash and timers ... - //#nesting + // #nesting timers.isTimerActive("aa") stash.isEmpty Behaviors.empty - //#nesting + // #nesting })) - //#nesting + // #nesting } object NestingSample2 { sealed trait Command - //#nesting-supervise + // #nesting-supervise def apply(): Behavior[Command] = Behaviors.setup { context => // only run on initial actor start, not on crash-restart @@ -575,13 +575,13 @@ object StyleGuideDocExamples { // every time the actor crashes and restarts a new stash is created (previous stash is lost) context.log.debug("Starting up with stash") // Behaviors.receiveMessage { ... } - //#nesting-supervise + // #nesting-supervise stash.isEmpty Behaviors.empty - //#nesting-supervise + // #nesting-supervise }) .onFailure[RuntimeException](SupervisorStrategy.restart) } - //#nesting-supervise + // #nesting-supervise } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala index 04d370dcbd..267ef5cef9 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala @@ -26,7 +26,7 @@ object ClassicWatchingTypedSpec { def props() = classic.Props(new Classic) } - //#classic-watch + // #classic-watch class Classic extends classic.Actor with ActorLogging { // context.spawn is an implicit extension method val second: ActorRef[Typed.Command] = @@ -51,9 +51,9 @@ object ClassicWatchingTypedSpec { context.stop(self) } } - //#classic-watch + // #classic-watch - //#typed + // #typed object Typed { sealed trait Command final case class Ping(replyTo: ActorRef[Pong.type]) extends Command @@ -70,7 +70,7 @@ object ClassicWatchingTypedSpec { } } } - //#typed + // #typed } class ClassicWatchingTypedSpec extends AnyWordSpec with LogCapturing { @@ -80,9 +80,9 @@ class ClassicWatchingTypedSpec extends AnyWordSpec with LogCapturing { "Classic -> Typed" must { "support creating, watching and messaging" in { val system = classic.ActorSystem("Coexistence") - //#create-classic + // #create-classic val classicActor = system.actorOf(Classic.props()) - //#create-classic + // #create-classic val probe = TestProbe()(system) probe.watch(classicActor) probe.expectTerminated(classicActor, 200.millis) @@ -90,11 +90,11 @@ class ClassicWatchingTypedSpec extends AnyWordSpec with LogCapturing { } "support converting a classic actor system to an actor system" in { - //#convert-classic + // #convert-classic val system = akka.actor.ActorSystem("ClassicToTypedSystem") val typedSystem: ActorSystem[Nothing] = system.toTyped - //#convert-classic + // #convert-classic typedSystem.scheduler // remove compile warning TestKit.shutdownActorSystem(system) } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala index 990250b4d4..bb207dc425 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala @@ -21,7 +21,7 @@ import scala.concurrent.duration._ object TypedWatchingClassicSpec { - //#typed + // #typed object Typed { final case class Ping(replyTo: akka.actor.typed.ActorRef[Pong.type]) sealed trait Command @@ -52,9 +52,9 @@ object TypedWatchingClassicSpec { } } } - //#typed + // #typed - //#classic + // #classic object Classic { def props(): classic.Props = classic.Props(new Classic) } @@ -64,7 +64,7 @@ object TypedWatchingClassicSpec { replyTo ! Typed.Pong } } - //#classic + // #classic } class TypedWatchingClassicSpec extends AnyWordSpec with LogCapturing { @@ -73,10 +73,10 @@ class TypedWatchingClassicSpec extends AnyWordSpec with LogCapturing { "Typed -> Classic" must { "support creating, watching and messaging" in { - //#create + // #create val system = classic.ActorSystem("TypedWatchingClassic") val typed = system.spawn(Typed.behavior, "Typed") - //#create + // #create val probe = TestProbe()(system) probe.watch(typed.toClassic) probe.expectTerminated(typed.toClassic, 200.millis) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala index aefcf2a8ec..c0c85f6604 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala @@ -51,10 +51,10 @@ object ExtensionDocSpec { val initialBehavior: Behavior[Any] = Behaviors.empty[Any] - //#usage + // #usage Behaviors.setup[Any] { ctx => DatabasePool(ctx.system).connection().executeQuery("insert into...") initialBehavior } - //#usage + // #usage } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala index 2831c1e32f..17607da53b 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala @@ -13,7 +13,7 @@ import akka.actor.Props object ClassicSample { - //#hello-world-actor + // #hello-world-actor object HelloWorld { final case class Greet(whom: String) final case class Greeted(whom: String) @@ -27,11 +27,11 @@ object ClassicSample { override def receive: Receive = { case Greet(whom) => - //#fiddle_code + // #fiddle_code log.info("Hello {}!", whom) sender() ! Greeted(whom) } } - //#hello-world-actor + // #hello-world-actor } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala index 7387eeb8d6..50a86e9664 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala @@ -15,7 +15,7 @@ import akka.actor.typed.scaladsl.Behaviors object TypedSample { - //#hello-world-actor + // #hello-world-actor object HelloWorld { final case class Greet(whom: String, replyTo: ActorRef[Greeted]) final case class Greeted(whom: String, from: ActorRef[Greet]) @@ -33,9 +33,9 @@ object TypedSample { this } } - //#hello-world-actor + // #hello-world-actor - //#children + // #children object Parent { sealed trait Command case class DelegateToChild(name: String, message: Child.Command) extends Command @@ -66,7 +66,7 @@ object TypedSample { updated(Map.empty) } } - //#children + // #children object Child { sealed trait Command diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala index 9f1b904a5a..384654087e 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala @@ -17,37 +17,37 @@ object SupervisionCompileOnly { val behavior = Behaviors.empty[String] - //#restart + // #restart Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.restart) - //#restart + // #restart - //#resume + // #resume Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.resume) - //#resume + // #resume - //#restart-limit + // #restart-limit Behaviors .supervise(behavior) .onFailure[IllegalStateException]( SupervisorStrategy.restart.withLimit(maxNrOfRetries = 10, withinTimeRange = 10.seconds)) - //#restart-limit + // #restart-limit - //#multiple + // #multiple Behaviors .supervise(Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.restart)) .onFailure[IllegalArgumentException](SupervisorStrategy.stop) - //#multiple + // #multiple - //#wrap + // #wrap object Counter { sealed trait Command case class Increment(nr: Int) extends Command case class GetCount(replyTo: ActorRef[Int]) extends Command - //#top-level + // #top-level def apply(): Behavior[Command] = Behaviors.supervise(counter(1)).onFailure(SupervisorStrategy.restart) - //#top-level + // #top-level private def counter(count: Int): Behavior[Command] = Behaviors.receiveMessage[Command] { @@ -58,9 +58,9 @@ object SupervisionCompileOnly { Behaviors.same } } - //#wrap + // #wrap - //#restart-stop-children + // #restart-stop-children def child(size: Long): Behavior[String] = Behaviors.receiveMessage(msg => child(size + msg.length)) @@ -82,9 +82,9 @@ object SupervisionCompileOnly { } .onFailure(SupervisorStrategy.restart) } - //#restart-stop-children + // #restart-stop-children - //#restart-keep-children + // #restart-keep-children def parent2: Behavior[String] = { Behaviors.setup { ctx => val child1 = ctx.spawn(child(0), "child1") @@ -104,7 +104,7 @@ object SupervisionCompileOnly { .onFailure(SupervisorStrategy.restart.withStopChildren(false)) } } - //#restart-keep-children + // #restart-keep-children trait Resource { def close(): Unit @@ -113,7 +113,7 @@ object SupervisionCompileOnly { def claimResource(): Resource = ??? @nowarn("msg=never used") - //#restart-PreRestart-signal + // #restart-PreRestart-signal def withPreRestart: Behavior[String] = { Behaviors .supervise[String] { @@ -138,5 +138,5 @@ object SupervisionCompileOnly { .onFailure[Exception](SupervisorStrategy.restart) } - //#restart-PreRestart-signal + // #restart-PreRestart-signal } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala index 757389d689..b86929b1df 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala @@ -11,7 +11,7 @@ import scala.concurrent.{ ExecutionContextExecutor, Future } import com.typesafe.config.{ Config, ConfigFactory } import org.slf4j.Logger -import akka.{ Done, actor => classic } +import akka.{ actor => classic, Done } import akka.actor.{ Address, BootstrapSetup, ClassicActorSystemProvider } import akka.actor.setup.ActorSystemSetup import akka.actor.typed.eventstream.EventStream diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala index e0f4eb6b87..6efee5a7d1 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala @@ -133,7 +133,6 @@ object Behavior { * The `ClassTag` for `Outer` ensures that only messages of this class or a subclass thereof will be * intercepted. Other message types (e.g. a private protocol) will bypass * the interceptor and be continue to the inner behavior untouched. - * */ def transformMessages[Outer: ClassTag](matcher: PartialFunction[Outer, Inner]): Behavior[Outer] = BehaviorImpl.transformMessages(behavior, matcher) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala index 3bd59a6be8..7e1e687282 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala @@ -39,7 +39,6 @@ trait Scheduler { * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` or `ActorContext.scheduleOnce` should be preferred. - * */ def scheduleOnce(delay: java.time.Duration, runnable: Runnable, executor: ExecutionContext): Cancellable @@ -62,7 +61,6 @@ trait Scheduler { * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` should be preferred. - * */ def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)( implicit executor: ExecutionContext): Cancellable @@ -124,7 +122,6 @@ trait Scheduler { * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` should be preferred. - * */ def scheduleAtFixedRate(initialDelay: FiniteDuration, interval: FiniteDuration)(runnable: Runnable)( implicit executor: ExecutionContext): Cancellable diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala index 1b7bfd4a5a..9a2ce8f60c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala @@ -258,8 +258,7 @@ object ProducerControllerImpl { settings: ProducerController.Settings, initialState: Option[DurableProducerQueue.State[A]])( thenBecomeActive: ( - ActorRef[RequestNext[A]], - ActorRef[ConsumerController.Command[A]], + ActorRef[RequestNext[A]], ActorRef[ConsumerController.Command[A]], DurableProducerQueue.State[A]) => Behavior[InternalCommand]): Behavior[InternalCommand] = { Behaviors.receiveMessagePartial[InternalCommand] { case RegisterConsumer(c: ActorRef[ConsumerController.Command[A]] @unchecked) => @@ -346,7 +345,8 @@ object ProducerControllerImpl { val manifest = Serializers.manifestFor(ser, mAnyRef) val serializerId = ser.identifier if (bytes.length <= chunkSize) { - ChunkedMessage(ByteString.fromArrayUnsafe(bytes), firstChunk = true, lastChunk = true, serializerId, manifest) :: Nil + ChunkedMessage(ByteString.fromArrayUnsafe(bytes), firstChunk = true, lastChunk = true, serializerId, + manifest) :: Nil } else { val builder = Vector.newBuilder[ChunkedMessage] val chunksIter = ByteString.fromArrayUnsafe(bytes).grouped(chunkSize) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala index c85710600b..0012d31b8d 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala @@ -373,7 +373,7 @@ private class WorkPullingProducerControllerImpl[A: ClassTag]( false } else { throw new IllegalStateException(s"Invalid combination of hasRequested [${s.requested}], " + - s"wasStashed [$wasStashed], hasMoreDemand [$hasMoreDemand], stashBuffer.isEmpty [${stashBuffer.isEmpty}]") + s"wasStashed [$wasStashed], hasMoreDemand [$hasMoreDemand], stashBuffer.isEmpty [${stashBuffer.isEmpty}]") } s.copy(out = newOut, requested = newRequested, preselectedWorkers = s.preselectedWorkers - totalSeqNr) @@ -443,7 +443,8 @@ private class WorkPullingProducerControllerImpl[A: ClassTag]( currentSeqNr = s.currentSeqNr + 1, preselectedWorkers = s.preselectedWorkers.updated(s.currentSeqNr, PreselectedWorker(outKey, out.confirmationQualifier)), - handOver = s.handOver.updated(s.currentSeqNr, HandOver(resend.oldConfirmationQualifier, resend.oldSeqNr)))) + handOver = + s.handOver.updated(s.currentSeqNr, HandOver(resend.oldConfirmationQualifier, resend.oldSeqNr)))) case None => checkStashFull(stashBuffer) // no demand from any workers, or all already preselected diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala index 491fc0d9d2..731e546012 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala @@ -43,7 +43,6 @@ object EventStream { * def subscribe(actorSystem: ActorSystem[_], actorRef: ActorRef[A]) = * actorSystem.eventStream ! EventStream.Subscribe[A1](actorRef) * }}} - * */ final case class Subscribe[E](subscriber: ActorRef[E])(implicit classTag: ClassTag[E]) extends Command { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala index 36f8c79a1e..1cc966cc04 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala @@ -218,7 +218,7 @@ import scala.util.Success override def ask[Req, Res](target: RecipientRef[Req], createRequest: ActorRef[Res] => Req)( mapResponse: Try[Res] => T)(implicit responseTimeout: Timeout, classTag: ClassTag[Res]): Unit = { import akka.actor.typed.scaladsl.AskPattern._ - pipeToSelf((target.ask(createRequest))(responseTimeout, system.scheduler))(mapResponse) + pipeToSelf(target.ask(createRequest)(responseTimeout, system.scheduler))(mapResponse) } override def askWithStatus[Req, Res](target: RecipientRef[Req], createRequest: ActorRef[StatusReply[Res]] => Req)( diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala index a9ebe68d09..407ad6804b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala @@ -100,15 +100,15 @@ private[akka] trait ExtensionsImpl extends Extensions { self: ActorSystem[_] wit } } catch { case t: Throwable => - //In case shit hits the fan, remove the inProcess signal and escalate to caller + // In case shit hits the fan, remove the inProcess signal and escalate to caller extensions.replace(ext, inProcessOfRegistration, t) throw t } finally { - //Always notify listeners of the inProcess signal + // Always notify listeners of the inProcess signal inProcessOfRegistration.countDown() } case _ => - //Someone else is in process of registering an extension for this Extension, retry + // Someone else is in process of registering an extension for this Extension, retry registerExtension(ext) } } @@ -119,10 +119,10 @@ private[akka] trait ExtensionsImpl extends Extensions { self: ActorSystem[_] wit @tailrec private def findExtension[T <: Extension](ext: ExtensionId[T]): T = extensions.get(ext) match { case c: CountDownLatch => - //Registration in process, await completion and retry + // Registration in process, await completion and retry c.await() findExtension(ext) - case t: Throwable => throw t //Initialization failed, throw same again - case other => other.asInstanceOf[T] //could be a T or null, in which case we return the null as T + case t: Throwable => throw t // Initialization failed, throw same again + case other => other.asInstanceOf[T] // could be a T or null, in which case we return the null as T } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala index 658ff63713..dd7540e377 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala @@ -95,7 +95,8 @@ private[akka] final class InterceptorImpl[O, I]( private def deduplicate(interceptedResult: Behavior[I], ctx: TypedActorContext[O]): Behavior[O] = { val started = Behavior.start(interceptedResult, ctx.asInstanceOf[TypedActorContext[I]]) - if (started == BehaviorImpl.UnhandledBehavior || started == BehaviorImpl.SameBehavior || !Behavior.isAlive(started)) { + if (started == BehaviorImpl.UnhandledBehavior || started == BehaviorImpl.SameBehavior || !Behavior.isAlive( + started)) { started.unsafeCast[O] } else { // returned behavior could be nested in setups, so we need to start before we deduplicate diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala index 1e088e59ee..b2036560a9 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala @@ -186,14 +186,15 @@ import java.util.function.Predicate else { val node = messages.next() val message = wrap(node.message) - val interpretResult = try { - message match { - case sig: Signal => Behavior.interpretSignal(b2, ctx, sig) - case msg => interpretUnstashedMessage(b2, ctx, msg, node) + val interpretResult = + try { + message match { + case sig: Signal => Behavior.interpretSignal(b2, ctx, sig) + case msg => interpretUnstashedMessage(b2, ctx, msg, node) + } + } catch { + case NonFatal(e) => throw UnstashException(e, b2) } - } catch { - case NonFatal(e) => throw UnstashException(e, b2) - } val actualNext = if (interpretResult == BehaviorImpl.same) b2 diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala index 46d3ac9355..5c463bf0ea 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala @@ -34,7 +34,7 @@ import scala.util.Try @InternalApi private[akka] object Supervisor { def apply[T, Thr <: Throwable: ClassTag](initialBehavior: Behavior[T], strategy: SupervisorStrategy): Behavior[T] = { if (initialBehavior.isInstanceOf[scaladsl.AbstractBehavior[_]] || initialBehavior - .isInstanceOf[javadsl.AbstractBehavior[_]]) { + .isInstanceOf[javadsl.AbstractBehavior[_]]) { throw new IllegalArgumentException( "The supervised Behavior must not be a AbstractBehavior instance directly," + "because a different instance should be created when it is restarted. Wrap in Behaviors.setup.") @@ -303,18 +303,20 @@ private class RestartSupervisor[T, Thr <: Throwable: ClassTag](initial: Behavior override protected def handleSignalException( ctx: TypedActorContext[Any], target: SignalTarget[T]): Catcher[Behavior[T]] = { - handleException(ctx, signalRestart = { - case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) - case _ => target(ctx, PreRestart) - }) + handleException(ctx, + signalRestart = { + case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) + case _ => target(ctx, PreRestart) + }) } override protected def handleReceiveException( ctx: TypedActorContext[Any], target: ReceiveTarget[T]): Catcher[Behavior[T]] = { - handleException(ctx, signalRestart = { - case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) - case _ => target.signalRestart(ctx) - }) + handleException(ctx, + signalRestart = { + case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) + case _ => target.signalRestart(ctx) + }) } private def handleException(ctx: TypedActorContext[Any], signalRestart: Throwable => Unit): Catcher[Behavior[T]] = { @@ -391,10 +393,11 @@ private class RestartSupervisor[T, Thr <: Throwable: ClassTag](initial: Behavior case _ => newBehavior } nextBehavior.narrow - } catch handleException(ctx, signalRestart = { - case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) - case _ => () - }) + } catch handleException(ctx, + signalRestart = { + case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) + case _ => () + }) } private def stopChildren(ctx: TypedActorContext[_], children: Set[ActorRef[Nothing]]): Unit = { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala index 85ad125aba..daaf3bab61 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala @@ -33,7 +33,6 @@ private[typed] object SystemMessageList { } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -45,7 +44,6 @@ private[typed] object SystemMessageList { * * The type of the list also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[typed] class LatestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ @@ -94,7 +92,6 @@ private[typed] class LatestFirstSystemMessageList(val head: SystemMessage) exten } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -106,7 +103,6 @@ private[typed] class LatestFirstSystemMessageList(val head: SystemMessage) exten * * This list type also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[typed] class EarliestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala index f1035688b3..3c2ca13ba3 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala @@ -69,7 +69,7 @@ import scala.concurrent.duration.FiniteDuration startTimerAtFixedRate(key, msg, initialDelay.asScala, interval.asScala) override final def startPeriodicTimer(key: Any, msg: T, interval: Duration): Unit = { - //this follows the deprecation note in the super class + // this follows the deprecation note in the super class startTimerWithFixedDelay(key, msg, interval.asScala) } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala index d6011b65fd..8904863b56 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala @@ -118,7 +118,7 @@ import akka.util.OptionVal if (c.hasTimer) { msg match { case timerMsg: TimerMsg => - //we can only get this kind of message if the timer is of this concrete class + // we can only get this kind of message if the timer is of this concrete class c.timer.asInstanceOf[TimerSchedulerImpl[T]].interceptTimerMsg(ctx.log, timerMsg) match { case OptionVal.Some(m) => next(Behavior.interpretMessage(behavior, c, m), m) @@ -187,17 +187,18 @@ import akka.util.OptionVal private def withSafelyAdapted[U, V](adapt: () => U)(body: U => V): Unit = { var failed = false - val adapted: U = try { - adapt() - } catch { - case NonFatal(ex) => - // pass it on through the signal handler chain giving supervision a chance to deal with it - handleSignal(MessageAdaptionFailure(ex)) - // Signal handler should actually throw so this is mostly to keep compiler happy (although a user could override - // the MessageAdaptionFailure handling to do something weird) - failed = true - null.asInstanceOf[U] - } + val adapted: U = + try { + adapt() + } catch { + case NonFatal(ex) => + // pass it on through the signal handler chain giving supervision a chance to deal with it + handleSignal(MessageAdaptionFailure(ex)) + // Signal handler should actually throw so this is mostly to keep compiler happy (although a user could override + // the MessageAdaptionFailure handling to do something weird) + failed = true + null.asInstanceOf[U] + } if (!failed) { if (adapted != null) body(adapted) else @@ -222,33 +223,34 @@ import akka.util.OptionVal case ex => ctx.setCurrentActorThread() try ex match { - case TypedActorFailedException(cause) => - // These have already been optionally logged by typed supervision - recordChildFailure(cause) - classic.SupervisorStrategy.Stop - case _ => - val isTypedActor = sender() match { - case afwc: ActorRefWithCell => - afwc.underlying.props.producer.actorClass == classOf[ActorAdapter[_]] - case _ => - false - } - recordChildFailure(ex) - val logMessage = ex match { - case e: ActorInitializationException if e.getCause ne null => - e.getCause match { - case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage - case ex => ex.getMessage - } - case e => e.getMessage - } - // log at Error as that is what the supervision strategy would have done. - ctx.log.error(logMessage, ex) - if (isTypedActor) + case TypedActorFailedException(cause) => + // These have already been optionally logged by typed supervision + recordChildFailure(cause) classic.SupervisorStrategy.Stop - else - ActorAdapter.classicSupervisorDecider(ex) - } finally { + case _ => + val isTypedActor = sender() match { + case afwc: ActorRefWithCell => + afwc.underlying.props.producer.actorClass == classOf[ActorAdapter[_]] + case _ => + false + } + recordChildFailure(ex) + val logMessage = ex match { + case e: ActorInitializationException if e.getCause ne null => + e.getCause match { + case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage + case ex => ex.getMessage + } + case e => e.getMessage + } + // log at Error as that is what the supervision strategy would have done. + ctx.log.error(logMessage, ex) + if (isTypedActor) + classic.SupervisorStrategy.Stop + else + ActorAdapter.classicSupervisorDecider(ex) + } + finally { ctx.clearCurrentActorThread() } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala index 45993f0c19..0f1d8fe0a4 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala @@ -197,12 +197,13 @@ private[akka] object LocalReceptionist extends ReceptionistBehaviorProvider { case None => } - updateServices(Set(key), { state => - val newState = state.serviceInstanceRemoved(key)(serviceInstance) - if (state.servicesPerActor.getOrElse(serviceInstance, Set.empty).isEmpty) - ctx.unwatch(serviceInstance) - newState - }) + updateServices(Set(key), + { state => + val newState = state.serviceInstanceRemoved(key)(serviceInstance) + if (state.servicesPerActor.getOrElse(serviceInstance, Set.empty).isEmpty) + ctx.unwatch(serviceInstance) + newState + }) } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala index 4197cb7db3..ca3bc94d3b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala @@ -62,8 +62,8 @@ private[akka] object RoutingLogics { val firstDiffIndex = { var idx = 0 while (idx < currentRoutees.length && - idx < sortedNewRoutees.length && - currentRoutees(idx) == sortedNewRoutees(idx)) { + idx < sortedNewRoutees.length && + currentRoutees(idx) == sortedNewRoutees(idx)) { idx += 1 } idx diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala index b48e3d1428..4010991804 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala @@ -27,7 +27,6 @@ import akka.util.JavaDurationConverters._ * message that is sent to the target Actor in order to function as a reply-to * address, therefore the argument to the ask method is not the message itself * but a function that given the reply-to address will create the message. - * */ object AskPattern { @@ -40,7 +39,7 @@ object AskPattern { messageFactory: JFunction[ActorRef[Res], Req], timeout: Duration, scheduler: Scheduler): CompletionStage[Res] = - (actor.ask(messageFactory.apply)(timeout.asScala, scheduler)).toJava + actor.ask(messageFactory.apply)(timeout.asScala, scheduler).toJava /** * The same as [[ask]] but only for requests that result in a response of type [[akka.pattern.StatusReply]]. @@ -53,6 +52,6 @@ object AskPattern { messageFactory: JFunction[ActorRef[StatusReply[Res]], Req], timeout: Duration, scheduler: Scheduler): CompletionStage[Res] = - (actor.askWithStatus(messageFactory.apply)(timeout.asScala, scheduler).toJava) + actor.askWithStatus(messageFactory.apply)(timeout.asScala, scheduler).toJava } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala index 43f8377ae5..ada286b3b6 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala @@ -5,7 +5,7 @@ package akka.actor.typed.javadsl import java.util.Collections -import java.util.function.{ Supplier, Function => JFunction } +import java.util.function.{ Function => JFunction, Supplier } import scala.reflect.ClassTag @@ -154,9 +154,10 @@ object Behaviors { def receive[T]( onMessage: JapiFunction2[ActorContext[T], T, Behavior[T]], onSignal: JapiFunction2[ActorContext[T], Signal, Behavior[T]]): Behavior[T] = { - new BehaviorImpl.ReceiveBehavior((ctx, msg) => onMessage.apply(ctx.asJava, msg), { - case (ctx, sig) => onSignal.apply(ctx.asJava, sig) - }) + new BehaviorImpl.ReceiveBehavior((ctx, msg) => onMessage.apply(ctx.asJava, msg), + { + case (ctx, sig) => onSignal.apply(ctx.asJava, sig) + }) } /** @@ -327,7 +328,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T]( interceptMessageClass: Class[T], @@ -344,7 +344,6 @@ object Behaviors { * @param staticMdc This MDC is setup in the logging context for every message * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T]( interceptMessageClass: Class[T], @@ -369,7 +368,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T]( interceptMessageClass: Class[T], diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala index ba31fa4493..e68ee3dec0 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala @@ -28,7 +28,6 @@ abstract class Receive[T] extends ExtensibleBehavior[T] { * * returning `stopped` will terminate this Behavior * * returning `same` designates to reuse the current Behavior * * returning `unhandled` keeps the same Behavior and signals that the message was not yet handled - * */ @throws(classOf[Exception]) def receiveMessage(msg: T): Behavior[T] diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala index 103b9633b3..f54d6c6928 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala @@ -77,12 +77,14 @@ final class ReceiveBuilder[T] private ( * @return this behavior builder */ def onMessageEquals(msg: T, handler: Creator[Behavior[T]]): ReceiveBuilder[T] = - withMessage(OptionVal.Some(msg.getClass), OptionVal.Some(new JPredicate[T] { - override def test(param: T): Boolean = param == (msg) - }), new JFunction[T, Behavior[T]] { - // invoke creator without the message - override def apply(param: T): Behavior[T] = handler.create() - }) + withMessage(OptionVal.Some(msg.getClass), + OptionVal.Some(new JPredicate[T] { + override def test(param: T): Boolean = param == msg + }), + new JFunction[T, Behavior[T]] { + // invoke creator without the message + override def apply(param: T): Behavior[T] = handler.create() + }) /** * Add a new case to the message handling matching any message. Subsequent `onMessage` clauses will @@ -128,11 +130,13 @@ final class ReceiveBuilder[T] private ( * @return this behavior builder */ def onSignalEquals(signal: Signal, handler: Creator[Behavior[T]]): ReceiveBuilder[T] = - withSignal(signal.getClass, OptionVal.Some(new JPredicate[Signal] { - override def test(param: Signal): Boolean = param == signal - }), new JFunction[Signal, Behavior[T]] { - override def apply(param: Signal): Behavior[T] = handler.create() - }) + withSignal(signal.getClass, + OptionVal.Some(new JPredicate[Signal] { + override def test(param: Signal): Boolean = param == signal + }), + new JFunction[Signal, Behavior[T]] { + override def apply(param: Signal): Behavior[T] = handler.create() + }) private def withMessage[M <: T]( `type`: OptionVal[Class[M]], diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala index 6df2c2fe47..6cc5ee722c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala @@ -8,7 +8,7 @@ import akka.actor.typed.{ scaladsl, Behavior } import akka.annotation.DoNotInherit import akka.japi.function.Procedure -import java.util.function.{ Predicate, Function => JFunction } +import java.util.function.{ Function => JFunction, Predicate } /** * A non thread safe mutable message buffer that can be used to buffer messages inside actors diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala index a358b0a7c4..ee357720bb 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala @@ -49,7 +49,6 @@ abstract class AbstractBehavior[T](protected val context: ActorContext[T]) exten *
  • returning `this` or `same` designates to reuse the current Behavior
  • *
  • returning `unhandled` keeps the same Behavior and signals that the message was not yet handled
  • * - * */ @throws(classOf[Exception]) def onMessage(msg: T): Behavior[T] @@ -87,9 +86,10 @@ abstract class AbstractBehavior[T](protected val context: ActorContext[T]) exten @throws(classOf[Exception]) override final def receiveSignal(ctx: TypedActorContext[T], msg: Signal): Behavior[T] = { checkRightContext(ctx) - onSignal.applyOrElse(msg, { - case MessageAdaptionFailure(ex) => throw ex - case _ => Behaviors.unhandled - }: PartialFunction[Signal, Behavior[T]]) + onSignal.applyOrElse(msg, + { + case MessageAdaptionFailure(ex) => throw ex + case _ => Behaviors.unhandled + }: PartialFunction[Signal, Behavior[T]]) } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala index c45bceb72f..db1ef0a62b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala @@ -250,7 +250,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T: ClassTag](mdcForMessage: T => Map[String, String])(behavior: Behavior[T]): Behavior[T] = withMdc[T](Map.empty[String, String], mdcForMessage)(behavior) @@ -265,7 +264,6 @@ object Behaviors { * @param staticMdc This MDC is setup in the logging context for every message * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T: ClassTag](staticMdc: Map[String, String])(behavior: Behavior[T]): Behavior[T] = withMdc[T](staticMdc, (_: T) => Map.empty[String, String])(behavior) @@ -288,7 +286,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T: ClassTag](staticMdc: Map[String, String], mdcForMessage: T => Map[String, String])( behavior: Behavior[T]): Behavior[T] = diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala index 05fafc44f4..2da3ca6193 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala @@ -21,7 +21,7 @@ package object scaladsl { * }}} * or * {{{ - *import akka.actor.typed.scaladsl._ + * import akka.actor.typed.scaladsl._ * }}} * * @param log the underlying [[org.slf4j.Logger]] diff --git a/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala b/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala index 903ad6cd4d..8ebfb31d1f 100644 --- a/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala @@ -70,12 +70,12 @@ object ByteIterator { final override def clone: ByteArrayIterator = new ByteArrayIterator(array, from, until) final override def take(n: Int): this.type = { - if (n < len) until = { if (n > 0) (from + n) else from } + if (n < len) until = { if (n > 0) from + n else from } this } final override def drop(n: Int): this.type = { - if (n > 0) from = { if (n < len) (from + n) else until } + if (n > 0) from = { if (n < len) from + n else until } this } @@ -153,7 +153,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = iterator.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xFF else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { if ((off < 0) || (len < 0) || (off + len > b.length)) throw new IndexOutOfBoundsException @@ -372,7 +372,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = current.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xFF else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { val nRead = current.asInputStream.read(b, off, len) @@ -511,9 +511,9 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getShort(implicit byteOrder: ByteOrder): Short = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next() & 0xff) << 8 | (next() & 0xff) << 0).toShort + ((next() & 0xFF) << 8 | (next() & 0xFF) << 0).toShort else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next() & 0xff) << 0 | (next() & 0xff) << 8).toShort + ((next() & 0xFF) << 0 | (next() & 0xFF) << 8).toShort else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -522,15 +522,15 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getInt(implicit byteOrder: ByteOrder): Int = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next() & 0xff) << 24 - | (next() & 0xff) << 16 - | (next() & 0xff) << 8 - | (next() & 0xff) << 0) + ((next() & 0xFF) << 24 + | (next() & 0xFF) << 16 + | (next() & 0xFF) << 8 + | (next() & 0xFF) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next() & 0xff) << 0 - | (next() & 0xff) << 8 - | (next() & 0xff) << 16 - | (next() & 0xff) << 24) + ((next() & 0xFF) << 0 + | (next() & 0xFF) << 8 + | (next() & 0xFF) << 16 + | (next() & 0xFF) << 24) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -539,23 +539,23 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getLong(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next().toLong & 0xff) << 56 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 0) + ((next().toLong & 0xFF) << 56 + | (next().toLong & 0xFF) << 48 + | (next().toLong & 0xFF) << 40 + | (next().toLong & 0xFF) << 32 + | (next().toLong & 0xFF) << 24 + | (next().toLong & 0xFF) << 16 + | (next().toLong & 0xFF) << 8 + | (next().toLong & 0xFF) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next().toLong & 0xff) << 0 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 56) + ((next().toLong & 0xFF) << 0 + | (next().toLong & 0xFF) << 8 + | (next().toLong & 0xFF) << 16 + | (next().toLong & 0xFF) << 24 + | (next().toLong & 0xFF) << 32 + | (next().toLong & 0xFF) << 40 + | (next().toLong & 0xFF) << 48 + | (next().toLong & 0xFF) << 56) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -566,11 +566,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLongPart(n: Int)(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) { var x = 0L - (1 to n).foreach(_ => x = (x << 8) | (next() & 0xff)) + (1 to n).foreach(_ => x = (x << 8) | (next() & 0xFF)) x } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { var x = 0L - (0 until n).foreach(i => x |= (next() & 0xff) << 8 * i) + (0 until n).foreach(i => x |= (next() & 0xFF) << 8 * i) x } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } diff --git a/akka-actor/src/main/scala-2.12/akka/util/ByteString.scala b/akka-actor/src/main/scala-2.12/akka/util/ByteString.scala index ba4cdc9c22..b11e3046c6 100644 --- a/akka-actor/src/main/scala-2.12/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala-2.12/akka/util/ByteString.scala @@ -75,7 +75,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte]): ByteString = ByteString1C(array) @@ -100,7 +99,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte], offset: Int, length: Int): ByteString = ByteString1(array, offset, length) @@ -304,7 +302,7 @@ object ByteString { os.write(bytes, startIndex, length) } - def isCompact: Boolean = (length == bytes.length) + def isCompact: Boolean = length == bytes.length private[akka] def byteStringCompanion = ByteString1 diff --git a/akka-actor/src/main/scala-2.12/akka/util/ccompat/package.scala b/akka-actor/src/main/scala-2.12/akka/util/ccompat/package.scala index c2c9172615..3af8da6ae3 100644 --- a/akka-actor/src/main/scala-2.12/akka/util/ccompat/package.scala +++ b/akka-actor/src/main/scala-2.12/akka/util/ccompat/package.scala @@ -5,7 +5,7 @@ package akka.util import scala.{ collection => c } -import scala.collection.{ GenTraversable, immutable => i, mutable => m } +import scala.collection.{ immutable => i, mutable => m, GenTraversable } import scala.collection.generic.{ CanBuildFrom, GenericCompanion, Sorted, SortedSetFactory } import scala.language.higherKinds import scala.language.implicitConversions @@ -49,8 +49,8 @@ package object ccompat { simpleCBF(fact.newBuilder[A]) private[akka] implicit def sortedSetCompanionToCBF[ - A: Ordering, - CC[X] <: c.SortedSet[X] with c.SortedSetLike[X, CC[X]]](fact: SortedSetFactory[CC]): CanBuildFrom[Any, A, CC[A]] = + A: Ordering, CC[X] <: c.SortedSet[X] with c.SortedSetLike[X, CC[X]]]( + fact: SortedSetFactory[CC]): CanBuildFrom[Any, A, CC[A]] = simpleCBF(fact.newBuilder[A]) private[ccompat] def build[T, CC](builder: m.Builder[T, CC], source: TraversableOnce[T]): CC = { diff --git a/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala b/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala index 88780bd50d..39ef608cdb 100644 --- a/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala @@ -72,12 +72,12 @@ object ByteIterator { final override def clone: ByteArrayIterator = new ByteArrayIterator(array, from, until) final override def take(n: Int): this.type = { - if (n < len) until = { if (n > 0) (from + n) else from } + if (n < len) until = { if (n > 0) from + n else from } this } final override def drop(n: Int): this.type = { - if (n > 0) from = { if (n < len) (from + n) else until } + if (n > 0) from = { if (n < len) from + n else until } this } @@ -165,7 +165,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = iterator.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xFF else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { if ((off < 0) || (len < 0) || (off + len > b.length)) throw new IndexOutOfBoundsException @@ -386,7 +386,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = current.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xFF else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { val nRead = current.asInputStream.read(b, off, len) @@ -527,9 +527,9 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getShort(implicit byteOrder: ByteOrder): Short = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next() & 0xff) << 8 | (next() & 0xff) << 0).toShort + ((next() & 0xFF) << 8 | (next() & 0xFF) << 0).toShort else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next() & 0xff) << 0 | (next() & 0xff) << 8).toShort + ((next() & 0xFF) << 0 | (next() & 0xFF) << 8).toShort else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -538,15 +538,15 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getInt(implicit byteOrder: ByteOrder): Int = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next() & 0xff) << 24 - | (next() & 0xff) << 16 - | (next() & 0xff) << 8 - | (next() & 0xff) << 0) + ((next() & 0xFF) << 24 + | (next() & 0xFF) << 16 + | (next() & 0xFF) << 8 + | (next() & 0xFF) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next() & 0xff) << 0 - | (next() & 0xff) << 8 - | (next() & 0xff) << 16 - | (next() & 0xff) << 24) + ((next() & 0xFF) << 0 + | (next() & 0xFF) << 8 + | (next() & 0xFF) << 16 + | (next() & 0xFF) << 24) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -555,23 +555,23 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getLong(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next().toLong & 0xff) << 56 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 0) + ((next().toLong & 0xFF) << 56 + | (next().toLong & 0xFF) << 48 + | (next().toLong & 0xFF) << 40 + | (next().toLong & 0xFF) << 32 + | (next().toLong & 0xFF) << 24 + | (next().toLong & 0xFF) << 16 + | (next().toLong & 0xFF) << 8 + | (next().toLong & 0xFF) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next().toLong & 0xff) << 0 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 56) + ((next().toLong & 0xFF) << 0 + | (next().toLong & 0xFF) << 8 + | (next().toLong & 0xFF) << 16 + | (next().toLong & 0xFF) << 24 + | (next().toLong & 0xFF) << 32 + | (next().toLong & 0xFF) << 40 + | (next().toLong & 0xFF) << 48 + | (next().toLong & 0xFF) << 56) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -582,11 +582,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLongPart(n: Int)(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) { var x = 0L - (1 to n).foreach(_ => x = (x << 8) | (next() & 0xff)) + (1 to n).foreach(_ => x = (x << 8) | (next() & 0xFF)) x } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { var x = 0L - (0 until n).foreach(i => x |= (next() & 0xff) << 8 * i) + (0 until n).foreach(i => x |= (next() & 0xFF) << 8 * i) x } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } diff --git a/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala b/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala index e009f3153b..182fa91e06 100644 --- a/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala @@ -78,7 +78,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte]): ByteString = ByteString1C(array) @@ -103,7 +102,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte], offset: Int, length: Int): ByteString = ByteString1(array, offset, length) @@ -313,7 +311,7 @@ object ByteString { os.write(bytes, startIndex, length) } - def isCompact: Boolean = (length == bytes.length) + def isCompact: Boolean = length == bytes.length private[akka] def byteStringCompanion = ByteString1 diff --git a/akka-actor/src/main/scala-3/akka/util/ByteIterator.scala b/akka-actor/src/main/scala-3/akka/util/ByteIterator.scala index d7d7b29205..6345477f91 100644 --- a/akka-actor/src/main/scala-3/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala-3/akka/util/ByteIterator.scala @@ -71,12 +71,12 @@ object ByteIterator { final override def clone: ByteArrayIterator = new ByteArrayIterator(array, from, until) final override def take(n: Int): this.type = { - if (n < len) until = { if (n > 0) (from + n) else from } + if (n < len) until = { if (n > 0) from + n else from } this } final override def drop(n: Int): this.type = { - if (n > 0) from = { if (n < len) (from + n) else until } + if (n > 0) from = { if (n < len) from + n else until } this } @@ -162,7 +162,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = iterator.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xFF else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { if ((off < 0) || (len < 0) || (off + len > b.length)) throw new IndexOutOfBoundsException @@ -382,7 +382,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = current.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xFF else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { val nRead = current.asInputStream.read(b, off, len) @@ -523,9 +523,9 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getShort(implicit byteOrder: ByteOrder): Short = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next() & 0xff) << 8 | (next() & 0xff) << 0).toShort + ((next() & 0xFF) << 8 | (next() & 0xFF) << 0).toShort else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next() & 0xff) << 0 | (next() & 0xff) << 8).toShort + ((next() & 0xFF) << 0 | (next() & 0xFF) << 8).toShort else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -534,15 +534,15 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getInt(implicit byteOrder: ByteOrder): Int = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next() & 0xff) << 24 - | (next() & 0xff) << 16 - | (next() & 0xff) << 8 - | (next() & 0xff) << 0) + ((next() & 0xFF) << 24 + | (next() & 0xFF) << 16 + | (next() & 0xFF) << 8 + | (next() & 0xFF) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next() & 0xff) << 0 - | (next() & 0xff) << 8 - | (next() & 0xff) << 16 - | (next() & 0xff) << 24) + ((next() & 0xFF) << 0 + | (next() & 0xFF) << 8 + | (next() & 0xFF) << 16 + | (next() & 0xFF) << 24) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -551,23 +551,23 @@ abstract class ByteIterator extends BufferedIterator[Byte] { */ def getLong(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) - ((next().toLong & 0xff) << 56 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 0) + ((next().toLong & 0xFF) << 56 + | (next().toLong & 0xFF) << 48 + | (next().toLong & 0xFF) << 40 + | (next().toLong & 0xFF) << 32 + | (next().toLong & 0xFF) << 24 + | (next().toLong & 0xFF) << 16 + | (next().toLong & 0xFF) << 8 + | (next().toLong & 0xFF) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) - ((next().toLong & 0xff) << 0 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 56) + ((next().toLong & 0xFF) << 0 + | (next().toLong & 0xFF) << 8 + | (next().toLong & 0xFF) << 16 + | (next().toLong & 0xFF) << 24 + | (next().toLong & 0xFF) << 32 + | (next().toLong & 0xFF) << 40 + | (next().toLong & 0xFF) << 48 + | (next().toLong & 0xFF) << 56) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -578,11 +578,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLongPart(n: Int)(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) { var x = 0L - (1 to n).foreach(_ => x = (x << 8) | (next() & 0xff)) + (1 to n).foreach(_ => x = (x << 8) | (next() & 0xFF)) x } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { var x = 0L - (0 until n).foreach(i => x |= (next() & 0xff) << 8 * i) + (0 until n).foreach(i => x |= (next() & 0xFF) << 8 * i) x } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } diff --git a/akka-actor/src/main/scala-3/akka/util/ByteString.scala b/akka-actor/src/main/scala-3/akka/util/ByteString.scala index a5744dd5e8..4ae11d7ec6 100644 --- a/akka-actor/src/main/scala-3/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala-3/akka/util/ByteString.scala @@ -80,7 +80,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte]): ByteString = ByteString1C(array) @@ -105,7 +104,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte], offset: Int, length: Int): ByteString = ByteString1(array, offset, length) @@ -314,7 +312,7 @@ object ByteString { os.write(bytes, startIndex, length) } - def isCompact: Boolean = (length == bytes.length) + def isCompact: Boolean = length == bytes.length private[akka] def byteStringCompanion = ByteString1 diff --git a/akka-actor/src/main/scala/akka/AkkaVersion.scala b/akka-actor/src/main/scala/akka/AkkaVersion.scala index 2aabdfef74..461b94d8ea 100644 --- a/akka-actor/src/main/scala/akka/AkkaVersion.scala +++ b/akka-actor/src/main/scala/akka/AkkaVersion.scala @@ -40,8 +40,8 @@ object AkkaVersion { if (mOrRc ne null) currentPatchStr.toInt - 1 else currentPatchStr.toInt if (requiredMajorStr.toInt != currentMajorStr.toInt || - requiredMinorStr.toInt > currentMinorStr.toInt || - (requiredMinorStr == currentMinorStr && requiredPatchStr.toInt > currentPatch)) + requiredMinorStr.toInt > currentMinorStr.toInt || + (requiredMinorStr == currentMinorStr && requiredPatchStr.toInt > currentPatch)) throw new UnsupportedAkkaVersion( s"Current version of Akka is [$currentVersion], but $libraryName requires version [$requiredVersion]") case _ => // SNAPSHOT or unknown - you're on your own diff --git a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala index f1bcb74862..0855a63bfd 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala @@ -215,7 +215,6 @@ object AbstractActor { * } * } * - * */ abstract class AbstractActor extends Actor { @@ -352,7 +351,6 @@ abstract class UntypedAbstractActor extends AbstractActor { * Java API: compatible with lambda expressions * * Actor base class that mixes in logging into the Actor. - * */ abstract class AbstractLoggingActor extends AbstractActor with ActorLogging @@ -398,7 +396,6 @@ abstract class AbstractLoggingActor extends AbstractActor with ActorLogging * For a `Stash` based actor that enforces unbounded deques see [[akka.actor.AbstractActorWithUnboundedStash]]. * There is also an unrestricted version [[akka.actor.AbstractActorWithUnrestrictedStash]] that does not * enforce the mailbox type. - * */ abstract class AbstractActorWithStash extends AbstractActor with Stash @@ -408,7 +405,6 @@ abstract class AbstractActorWithStash extends AbstractActor with Stash * Actor base class with `Stash` that enforces an unbounded deque for the actor. The proper mailbox has to be configured * manually, and the mailbox should extend the [[akka.dispatch.DequeBasedMessageQueueSemantics]] marker trait. * See [[akka.actor.AbstractActorWithStash]] for details on how `Stash` works. - * */ abstract class AbstractActorWithUnboundedStash extends AbstractActor with UnboundedStash @@ -417,6 +413,5 @@ abstract class AbstractActorWithUnboundedStash extends AbstractActor with Unboun * * Actor base class with `Stash` that does not enforce any mailbox type. The mailbox of the actor has to be configured * manually. See [[akka.actor.AbstractActorWithStash]] for details on how `Stash` works. - * */ abstract class AbstractActorWithUnrestrictedStash extends AbstractActor with UnrestrictedStash diff --git a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala index 6b560e6269..48c667a4d2 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala @@ -10,7 +10,6 @@ import akka.util.JavaDurationConverters._ /** * Java API: compatible with lambda expressions - * */ object AbstractFSM { @@ -29,7 +28,6 @@ object AbstractFSM { * Java API: compatible with lambda expressions * * Finite State Machine actor abstract base class. - * */ abstract class AbstractFSM[S, D] extends FSM[S, D] { import java.util.{ List => JList } @@ -553,7 +551,6 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * Java API: compatible with lambda expressions * * Finite State Machine actor abstract base class. - * */ abstract class AbstractLoggingFSM[S, D] extends AbstractFSM[S, D] with LoggingFSM[S, D] @@ -561,6 +558,5 @@ abstract class AbstractLoggingFSM[S, D] extends AbstractFSM[S, D] with LoggingFS * Java API: compatible with lambda expressions * * Finite State Machine actor abstract base class with Stash support. - * */ abstract class AbstractFSMWithStash[S, D] extends AbstractFSM[S, D] with Stash diff --git a/akka-actor/src/main/scala/akka/actor/AbstractProps.scala b/akka-actor/src/main/scala/akka/actor/AbstractProps.scala index 04a84d1783..278495223f 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractProps.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractProps.scala @@ -14,7 +14,6 @@ import akka.japi.Creator import akka.util.Reflect /** - * * Java API: Factory for Props instances. */ private[akka] trait AbstractProps { @@ -26,7 +25,7 @@ private[akka] trait AbstractProps { if (Modifier.isAbstract(clazz.getModifiers)) { throw new IllegalArgumentException(s"Actor class [${clazz.getName}] must not be abstract") } else if (!classOf[Actor].isAssignableFrom(clazz) && - !classOf[IndirectActorProducer].isAssignableFrom(clazz)) { + !classOf[IndirectActorProducer].isAssignableFrom(clazz)) { throw new IllegalArgumentException( s"Actor class [${clazz.getName}] must be subClass of akka.actor.Actor or akka.actor.IndirectActorProducer.") } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 7f5ebb3215..49d5e5d809 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -376,10 +376,10 @@ object Actor { /** * Type alias representing a Receive-expression for Akka Actors. */ - //#receive + // #receive type Receive = PartialFunction[Any, Unit] - //#receive + // #receive /** * emptyBehavior is a Receive-expression that matches no messages at all, ever. @@ -503,7 +503,7 @@ trait Actor { * self ! message * */ - implicit final val self: ActorRef = context.self //MUST BE A VAL, TRUST ME + implicit final val self: ActorRef = context.self // MUST BE A VAL, TRUST ME /** * The reference sender Actor of the last received message. @@ -519,9 +519,9 @@ trait Actor { * Scala API: This defines the initial actor behavior, it must return a partial function * with the actor logic. */ - //#receive + // #receive def receive: Actor.Receive - //#receive + // #receive /** * INTERNAL API. @@ -585,10 +585,10 @@ trait Actor { * Empty default implementation. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def preStart(): Unit = () - //#lifecycle-hooks + // #lifecycle-hooks /** * User overridable callback. @@ -597,10 +597,10 @@ trait Actor { * Empty default implementation. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def postStop(): Unit = () - //#lifecycle-hooks + // #lifecycle-hooks /** * Scala API: User overridable callback: '''By default it disposes of all children and then calls `postStop()`.''' @@ -611,7 +611,7 @@ trait Actor { * up of resources before Actor is terminated. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def preRestart(@unused reason: Throwable, @unused message: Option[Any]): Unit = { context.children.foreach { child => context.unwatch(child) @@ -620,7 +620,7 @@ trait Actor { postStop() } - //#lifecycle-hooks + // #lifecycle-hooks /** * User overridable callback: By default it calls `preStart()`. @@ -629,11 +629,11 @@ trait Actor { * Is called right AFTER restart on the newly created Actor to allow reinitialization after an Actor crash. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def postRestart(@unused reason: Throwable): Unit = { preStart() } - //#lifecycle-hooks + // #lifecycle-hooks /** * User overridable callback. diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index eabd5e1e9f..de0ca9c047 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -467,7 +467,7 @@ private[akka] class ActorCell( /* * MESSAGE PROCESSING */ - //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status + // Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status final def systemInvoke(message: SystemMessage): Unit = { /* * When recreate/suspend/resume are received while restarting (i.e. between @@ -521,8 +521,8 @@ private[akka] class ActorCell( case NoMessage => // only here to suppress warning } } catch handleNonFatalOrInterruptedException { e => - handleInvokeFailure(Nil, e) - } + handleInvokeFailure(Nil, e) + } val newState = calculateState // As each state accepts a strict subset of another state, it is enough to unstash if we "walk up" the state // chain @@ -535,7 +535,7 @@ private[akka] class ActorCell( invokeAll(new EarliestFirstSystemMessageList(message), calculateState) } - //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status + // Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status final def invoke(messageHandle: Envelope): Unit = { val msg = messageHandle.message val timeoutBeforeReceive = cancelReceiveTimeoutIfNeeded(msg) @@ -548,10 +548,11 @@ private[akka] class ActorCell( } currentMessage = null // reset current message after successful invocation } catch handleNonFatalOrInterruptedException { e => - handleInvokeFailure(Nil, e) - } finally - // Schedule or reschedule receive timeout - checkReceiveTimeoutIfNeeded(msg, timeoutBeforeReceive) + handleInvokeFailure(Nil, e) + } + finally + // Schedule or reschedule receive timeout + checkReceiveTimeoutIfNeeded(msg, timeoutBeforeReceive) } def autoReceiveMessage(msg: Envelope): Unit = { @@ -607,7 +608,7 @@ private[akka] class ActorCell( * ACTOR INSTANCE HANDLING */ - //This method is in charge of setting up the contextStack and create a new instance of the Actor + // This method is in charge of setting up the contextStack and create a new instance of the Actor protected def newActor(): Actor = { contextStack.set(this :: contextStack.get) try { diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index cb9b488ce2..49a2b5ebea 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -89,17 +89,18 @@ object ActorPath { // If the number of cases increase remember to add a `@switch` annotation e.g.: // (findInvalidPathElementCharPosition(element): @switch) match { - (findInvalidPathElementCharPosition(element)) match { + findInvalidPathElementCharPosition(element) match { case ValidPathCode => // valid case EmptyPathCode => throw InvalidActorNameException(s"Actor path element must not be empty $fullPathMsg") case invalidAt => - throw InvalidActorNameException(s"""Invalid actor path element [$element]$fullPathMsg, illegal character [${element( - invalidAt)}] at position: $invalidAt. """ + - """Actor paths MUST: """ + - """not start with `$`, """ + - s"""include only ASCII letters and can only contain these special characters: ${ActorPath.ValidSymbols}.""") + throw InvalidActorNameException( + s"""Invalid actor path element [$element]$fullPathMsg, illegal character [${element( + invalidAt)}] at position: $invalidAt. """ + + """Actor paths MUST: """ + + """not start with `$`, """ + + s"""include only ASCII letters and can only contain these special characters: ${ActorPath.ValidSymbols}.""") } } @@ -130,7 +131,8 @@ object ActorPath { case '%' if pos + 2 < len && isHexChar(s.charAt(pos + 1)) && isHexChar(s.charAt(pos + 2)) => validate(pos + 3) case _ => pos - } else ValidPathCode + } + else ValidPathCode if (len > 0 && s.charAt(0) != '$') validate(0) else 0 } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index ad479f40f4..6e7b49526d 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -643,7 +643,7 @@ trait WrappedMessage { private[akka] object DeadLetterActorRef { @SerialVersionUID(1L) - class SerializedDeadLetterActorRef extends Serializable { //TODO implement as Protobuf for performance? + class SerializedDeadLetterActorRef extends Serializable { // TODO implement as Protobuf for performance? @throws(classOf[java.io.ObjectStreamException]) private def readResolve(): AnyRef = JavaSerializer.currentSystem.value.deadLetters } @@ -895,7 +895,7 @@ private[akka] class VirtualPathContainer( case u: Unwatch => remWatcher(u.watchee, u.watcher) case DeathWatchNotification(actorRef, _, _) => this.!(Terminated(actorRef)(existenceConfirmed = true, addressTerminated = false))(actorRef) - case _ => //ignore all other messages + case _ => // ignore all other messages } } @@ -912,7 +912,7 @@ private[akka] class VirtualPathContainer( @InternalApi override private[akka] def isTerminated: Boolean = _watchedBy.isEmpty - //noinspection EmptyCheck + // noinspection EmptyCheck protected def sendTerminated(): Unit = { def unwatchWatched(watched: ActorRef): Unit = watched.asInstanceOf[InternalActorRef].sendSystemMessage(Unwatch(watched, this)) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 46b047a90a..1b4141ab75 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -446,7 +446,7 @@ private[akka] class LocalActorRefProvider private[akka] ( override def stop(): Unit = { causeOfTermination.trySuccess( - Terminated(provider.rootGuardian)(existenceConfirmed = true, addressTerminated = true)) //Idempotent + Terminated(provider.rootGuardian)(existenceConfirmed = true, addressTerminated = true)) // Idempotent terminationPromise.completeWith(causeOfTermination.future) // Signal termination downstream, idempotent } diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index 91c6dabfa0..221fc4a3d4 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -96,7 +96,6 @@ abstract class ActorSelection extends Serializable { * if such an actor exists. It is completed with failure [[ActorNotFound]] if * no such actor exists or the identification didn't complete within the * supplied `timeout`. - * */ @deprecated("Use the overloaded method resolveOne which accepts java.time.Duration instead.", since = "2.5.20") def resolveOneCS(timeout: FiniteDuration): CompletionStage[ActorRef] = @@ -110,7 +109,6 @@ abstract class ActorSelection extends Serializable { * if such an actor exists. It is completed with failure [[ActorNotFound]] if * no such actor exists or the identification didn't complete within the * supplied `timeout`. - * */ @deprecated("Use the overloaded method resolveOne which accepts java.time.Duration instead.", since = "2.5.20") def resolveOneCS(timeout: java.time.Duration): CompletionStage[ActorRef] = resolveOne(timeout) @@ -123,7 +121,6 @@ abstract class ActorSelection extends Serializable { * if such an actor exists. It is completed with failure [[ActorNotFound]] if * no such actor exists or the identification didn't complete within the * supplied `timeout`. - * */ def resolveOne(timeout: java.time.Duration): CompletionStage[ActorRef] = { import JavaDurationConverters._ @@ -189,7 +186,7 @@ abstract class ActorSelection extends Serializable { * allowing for broadcasting of messages to that section. */ object ActorSelection { - //This cast is safe because the self-type of ActorSelection requires that it mixes in ScalaActorSelection + // This cast is safe because the self-type of ActorSelection requires that it mixes in ScalaActorSelection implicit def toScala(sel: ActorSelection): ScalaActorSelection = sel.asInstanceOf[ScalaActorSelection] /** diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index d5734f3bf5..7ae33b4b08 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -338,7 +338,7 @@ object ActorSystem { val loggingFilterAlreadyConfigured = configuredLoggingFilter == slf4jLoggingFilterClassName || configuredLoggingFilter != classOf[ - DefaultLoggingFilter].getName + DefaultLoggingFilter].getName def newLoggingFilterConfStr = s"""$loggingFilterConfKey = "$slf4jLoggingFilterClassName"""" @@ -351,7 +351,7 @@ object ActorSystem { } else { val confKey = "akka.use-slf4j" if (config.hasPath(confKey) && config.getBoolean(confKey) && dynamicAccess.classIsOnClasspath( - slf4jLoggerClassName)) { + slf4jLoggerClassName)) { val newLoggers = slf4jLoggerClassName +: configuredLoggers.filterNot(_ == classOf[DefaultLogger].getName) val newLoggersConfStr = s"$loggersConfKey = [${newLoggers.mkString("\"", "\", \"", "\"")}]" val newConfStr = @@ -935,19 +935,20 @@ private[akka] class ActorSystemImpl( val scheduler: Scheduler = createScheduler() - val provider: ActorRefProvider = try { - val arguments = Vector( - classOf[String] -> name, - classOf[Settings] -> settings, - classOf[EventStream] -> eventStream, - classOf[DynamicAccess] -> dynamicAccess) + val provider: ActorRefProvider = + try { + val arguments = Vector( + classOf[String] -> name, + classOf[Settings] -> settings, + classOf[EventStream] -> eventStream, + classOf[DynamicAccess] -> dynamicAccess) - dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get - } catch { - case NonFatal(e) => - Try(stopScheduler()) - throw e - } + dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get + } catch { + case NonFatal(e) => + Try(stopScheduler()) + throw e + } def deadLetters: ActorRef = provider.deadLetters @@ -1026,28 +1027,29 @@ private[akka] class ActorSystemImpl( "The calling code expected that the ActorSystem was initialized but it wasn't yet. " + "This is probably a bug in the ActorSystem initialization sequence often related to initialization of extensions. " + "Please report at https://github.com/akka/akka/issues.") - private lazy val _start: this.type = try { + private lazy val _start: this.type = + try { - registerOnTermination(stopScheduler()) - // the provider is expected to start default loggers, LocalActorRefProvider does this - provider.init(this) - // at this point it should be initialized "enough" for most extensions that we might want to guard against otherwise - _initialized = true + registerOnTermination(stopScheduler()) + // the provider is expected to start default loggers, LocalActorRefProvider does this + provider.init(this) + // at this point it should be initialized "enough" for most extensions that we might want to guard against otherwise + _initialized = true - if (settings.LogDeadLetters > 0) - logDeadLetterListener = Some(systemActorOf(Props[DeadLetterListener](), "deadLetterListener")) - eventStream.startUnsubscriber() - ManifestInfo(this).checkSameVersion("Akka", allModules, logWarning = true) - if (!terminating) - loadExtensions() - if (LogConfigOnStart) logConfiguration() - this - } catch { - case NonFatal(e) => - try terminate() - catch { case NonFatal(_) => Try(stopScheduler()) } - throw e - } + if (settings.LogDeadLetters > 0) + logDeadLetterListener = Some(systemActorOf(Props[DeadLetterListener](), "deadLetterListener")) + eventStream.startUnsubscriber() + ManifestInfo(this).checkSameVersion("Akka", allModules, logWarning = true) + if (!terminating) + loadExtensions() + if (LogConfigOnStart) logConfiguration() + this + } catch { + case NonFatal(e) => + try terminate() + catch { case NonFatal(_) => Try(stopScheduler()) } + throw e + } def start(): this.type = _start def registerOnTermination[T](code: => T): Unit = { registerOnTermination(new Runnable { def run() = code }) } @@ -1096,7 +1098,7 @@ private[akka] class ActorSystemImpl( terminate() } - //#create-scheduler + // #create-scheduler /** * Create the scheduler service. This one needs one special behavior: if * Closeable, it MUST execute all outstanding tasks upon .close() in order @@ -1115,7 +1117,7 @@ private[akka] class ActorSystemImpl( classOf[LoggingAdapter] -> log, classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler"))) .get - //#create-scheduler + // #create-scheduler /* * This is called after the last actor has signaled its termination, i.e. @@ -1148,16 +1150,16 @@ private[akka] class ActorSystemImpl( "A serializer must not access the SerializationExtension from its constructor. Use lazy init." else "Could be deadlock due to cyclic initialization of extensions.")) } - findExtension(ext) //Registration in process, await completion and retry - case t: Throwable => throw t //Initialization failed, throw same again + findExtension(ext) // Registration in process, await completion and retry + case t: Throwable => throw t // Initialization failed, throw same again case other => - other.asInstanceOf[T] //could be a T or null, in which case we return the null as T + other.asInstanceOf[T] // could be a T or null, in which case we return the null as T } @tailrec final def registerExtension[T <: Extension](ext: ExtensionId[T]): T = { findExtension(ext) match { - case null => //Doesn't already exist, commence registration + case null => // Doesn't already exist, commence registration val inProcessOfRegistration = new CountDownLatch(1) extensions.putIfAbsent(ext, inProcessOfRegistration) match { // Signal that registration is in process case null => @@ -1166,18 +1168,18 @@ private[akka] class ActorSystemImpl( case null => throw new IllegalStateException(s"Extension instance created as 'null' for extension [$ext]") case instance => - extensions.replace(ext, inProcessOfRegistration, instance) //Replace our in process signal with the initialized extension - instance //Profit! + extensions.replace(ext, inProcessOfRegistration, instance) // Replace our in process signal with the initialized extension + instance // Profit! } } catch { case t: Throwable => - extensions.replace(ext, inProcessOfRegistration, t) //In case shit hits the fan, remove the inProcess signal - throw t //Escalate to caller + extensions.replace(ext, inProcessOfRegistration, t) // In case shit hits the fan, remove the inProcess signal + throw t // Escalate to caller } finally { - inProcessOfRegistration.countDown() //Always notify listeners of the inProcess signal + inProcessOfRegistration.countDown() // Always notify listeners of the inProcess signal } case _ => - registerExtension(ext) //Someone else is in process of registering an extension for this Extension, retry + registerExtension(ext) // Someone else is in process of registering an extension for this Extension, retry } case existing => existing.asInstanceOf[T] } diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index c9d9f95f0d..f01f98351a 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -89,7 +89,8 @@ final case class Address private[akka] (protocol: String, system: String, host: */ def hostPort: String = toString.substring(protocol.length + 3) - /** INTERNAL API + /** + * INTERNAL API * Check if the address is not created through `AddressFromURIString`, if there * are any unusual characters in the host string. */ diff --git a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala index 09ef75ab28..81b32d53b1 100644 --- a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala +++ b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala @@ -735,25 +735,26 @@ final class CoordinatedShutdown private[akka] ( val result = phaseDef.run(recoverEnabled) val timeout = phases(phaseName).timeout val deadline = Deadline.now + timeout - val timeoutFut = try { - after(timeout, system.scheduler) { - if (phaseName == CoordinatedShutdown.PhaseActorSystemTerminate && deadline.hasTimeLeft()) { - // too early, i.e. triggered by system termination + val timeoutFut = + try { + after(timeout, system.scheduler) { + if (phaseName == CoordinatedShutdown.PhaseActorSystemTerminate && deadline.hasTimeLeft()) { + // too early, i.e. triggered by system termination + result + } else if (result.isCompleted) + Future.successful(Done) + else if (recoverEnabled) { + log.warning("Coordinated shutdown phase [{}] timed out after {}", phaseName, timeout) + Future.successful(Done) + } else + Future.failed( + new TimeoutException(s"Coordinated shutdown phase [$phaseName] timed out after $timeout")) + } + } catch { + case _: IllegalStateException => + // The call to `after` threw IllegalStateException, triggered by system termination result - } else if (result.isCompleted) - Future.successful(Done) - else if (recoverEnabled) { - log.warning("Coordinated shutdown phase [{}] timed out after {}", phaseName, timeout) - Future.successful(Done) - } else - Future.failed( - new TimeoutException(s"Coordinated shutdown phase [$phaseName] timed out after $timeout")) } - } catch { - case _: IllegalStateException => - // The call to `after` threw IllegalStateException, triggered by system termination - result - } Future.firstCompletedOf(List(result, timeoutFut)) } if (remaining.isEmpty) diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 9f07e46ea4..0149bf3fc1 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -227,7 +227,7 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce .unwrapped .asScala .collect { - case (key, value: String) => (key -> value) + case (key, value: String) => key -> value } .toMap diff --git a/akka-actor/src/main/scala/akka/actor/Extension.scala b/akka-actor/src/main/scala/akka/actor/Extension.scala index 93fa50429f..5b03769d71 100644 --- a/akka-actor/src/main/scala/akka/actor/Extension.scala +++ b/akka-actor/src/main/scala/akka/actor/Extension.scala @@ -91,7 +91,6 @@ trait ExtensionId[T <: Extension] { * {{{ * override def get(system: ActorSystem): TheExtension = super.get(system) * }}} - * */ def get(system: ActorSystem): T = apply(system) @@ -103,7 +102,6 @@ trait ExtensionId[T <: Extension] { * {{{ * override def get(system: ClassicActorSystemProvider): TheExtension = super.get(system) * }}} - * */ def get(system: ClassicActorSystemProvider): T = apply(system) diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 7670219655..4a2f2a3047 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -43,7 +43,7 @@ final case class ChildRestartStats( def uid: Int = child.path.uid - //FIXME How about making ChildRestartStats immutable and then move these methods into the actual supervisor strategies? + // FIXME How about making ChildRestartStats immutable and then move these methods into the actual supervisor strategies? def requestRestartPermission(retriesWindow: (Option[Int], Option[Int])): Boolean = retriesWindow match { case (Some(retries), _) if retries < 1 => false @@ -280,12 +280,12 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { */ private[akka] def sort(in: Iterable[CauseDirective]): immutable.Seq[CauseDirective] = in.foldLeft(new ArrayBuffer[CauseDirective](in.size)) { (buf, ca) => - buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { - case -1 => buf.append(ca) - case x => buf.insert(x, ca) - } - buf + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) } + buf + } .to(immutable.IndexedSeq) private[akka] def withinTimeRangeOption(withinTimeRange: Duration): Option[Duration] = @@ -548,7 +548,7 @@ case class AllForOneStrategy( children: Iterable[ChildRestartStats]): Unit = { if (children.nonEmpty) { if (restart && children.forall(_.requestRestartPermission(retriesWindow))) - children.foreach(crs => restartChild(crs.child, cause, suspendFirst = (crs.child != child))) + children.foreach(crs => restartChild(crs.child, cause, suspendFirst = crs.child != child)) else for (c <- children) context.stop(c.child) } @@ -661,6 +661,6 @@ case class OneForOneStrategy( if (restart && stats.requestRestartPermission(retriesWindow)) restartChild(child, cause, suspendFirst = false) else - context.stop(child) //TODO optimization to drop child here already? + context.stop(child) // TODO optimization to drop child here already? } } diff --git a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala index 5b2894a736..c03214a82e 100644 --- a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala @@ -103,46 +103,47 @@ class LightArrayRevolverScheduler(config: Config, log: LoggingAdapter, threadFac implicit executor: ExecutionContext): Cancellable = { checkMaxDelay(roundUp(delay).toNanos) try new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self => - compareAndSet( - InitialRepeatMarker, - schedule( - executor, - new AtomicLong(clock() + initialDelay.toNanos) with Runnable { - override def run(): Unit = { - try { - runnable.run() - val driftNanos = clock() - getAndAdd(delay.toNanos) - if (self.get != null) - swap(schedule(executor, this, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)))) - } catch { - case _: SchedulerException => // ignore failure to enqueue or terminated target actor + compareAndSet( + InitialRepeatMarker, + schedule( + executor, + new AtomicLong(clock() + initialDelay.toNanos) with Runnable { + override def run(): Unit = { + try { + runnable.run() + val driftNanos = clock() - getAndAdd(delay.toNanos) + if (self.get != null) + swap(schedule(executor, this, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)))) + } catch { + case _: SchedulerException => // ignore failure to enqueue or terminated target actor + } } - } - }, - roundUp(initialDelay))) + }, + roundUp(initialDelay))) - @tailrec private def swap(c: Cancellable): Unit = { - get match { - case null => if (c != null) c.cancel() - case old => if (!compareAndSet(old, c)) swap(c) - } - } - - final def cancel(): Boolean = { - @tailrec def tailrecCancel(): Boolean = { + @tailrec private def swap(c: Cancellable): Unit = { get match { - case null => false - case c => - if (c.cancel()) compareAndSet(c, null) - else compareAndSet(c, null) || tailrecCancel() + case null => if (c != null) c.cancel() + case old => if (!compareAndSet(old, c)) swap(c) } } - tailrecCancel() - } + final def cancel(): Boolean = { + @tailrec def tailrecCancel(): Boolean = { + get match { + case null => false + case c => + if (c.cancel()) compareAndSet(c, null) + else compareAndSet(c, null) || tailrecCancel() + } + } - override def isCancelled: Boolean = get == null - } catch { + tailrecCancel() + } + + override def isCancelled: Boolean = get == null + } + catch { case cause @ SchedulerException(msg) => throw new IllegalStateException(msg, cause) } } diff --git a/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala index 96ab517f6a..27cf955bf3 100644 --- a/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala @@ -25,11 +25,11 @@ import akka.annotation.DoNotInherit class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAccess { override def getClassFor[T: ClassTag](fqcn: String): Try[Class[_ <: T]] = - Try[Class[_ <: T]]({ + Try[Class[_ <: T]] { val c = Class.forName(fqcn, false, classLoader).asInstanceOf[Class[_ <: T]] val t = implicitly[ClassTag[T]].runtimeClass if (t.isAssignableFrom(c)) c else throw new ClassCastException(t.toString + " is not assignable from " + c) - }) + } override def createInstanceFor[T: ClassTag](clazz: Class[_], args: immutable.Seq[(Class[_], AnyRef)]): Try[T] = Try { diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 8a1f35ff5e..f65abcfc77 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -73,46 +73,47 @@ trait Scheduler { def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)( implicit executor: ExecutionContext): Cancellable = { try new AtomicReference[Cancellable](Cancellable.initialNotCancelled) with Cancellable { self => - compareAndSet( - Cancellable.initialNotCancelled, - scheduleOnce( - initialDelay, - new Runnable { - override def run(): Unit = { - try { - runnable.run() - if (self.get != null) - swap(scheduleOnce(delay, this)) - } catch { - // ignore failure to enqueue or terminated target actor - case _: SchedulerException => - case e: IllegalStateException if e.getCause != null && e.getCause.isInstanceOf[SchedulerException] => + compareAndSet( + Cancellable.initialNotCancelled, + scheduleOnce( + initialDelay, + new Runnable { + override def run(): Unit = { + try { + runnable.run() + if (self.get != null) + swap(scheduleOnce(delay, this)) + } catch { + // ignore failure to enqueue or terminated target actor + case _: SchedulerException => + case e: IllegalStateException if e.getCause != null && e.getCause.isInstanceOf[SchedulerException] => + } } - } - })) + })) - @tailrec private def swap(c: Cancellable): Unit = { - get match { - case null => if (c != null) c.cancel() - case old => if (!compareAndSet(old, c)) swap(c) - } - } - - final def cancel(): Boolean = { - @tailrec def tailrecCancel(): Boolean = { + @tailrec private def swap(c: Cancellable): Unit = { get match { - case null => false - case c => - if (c.cancel()) compareAndSet(c, null) - else compareAndSet(c, null) || tailrecCancel() + case null => if (c != null) c.cancel() + case old => if (!compareAndSet(old, c)) swap(c) } } - tailrecCancel() - } + final def cancel(): Boolean = { + @tailrec def tailrecCancel(): Boolean = { + get match { + case null => false + case c => + if (c.cancel()) compareAndSet(c, null) + else compareAndSet(c, null) || tailrecCancel() + } + } - override def isCancelled: Boolean = get == null - } catch { + tailrecCancel() + } + + override def isCancelled: Boolean = get == null + } + catch { case SchedulerException(msg) => throw new IllegalStateException(msg) } } @@ -441,9 +442,10 @@ trait Scheduler { implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = - scheduleOnce(delay, new Runnable { - override def run(): Unit = receiver ! message - }) + scheduleOnce(delay, + new Runnable { + override def run(): Unit = receiver ! message + }) /** * Java API: Schedules a message to be sent once with a delay, i.e. a time period that has diff --git a/akka-actor/src/main/scala/akka/actor/Stash.scala b/akka-actor/src/main/scala/akka/actor/Stash.scala index 19c10545a2..db64c8da9a 100644 --- a/akka-actor/src/main/scala/akka/actor/Stash.scala +++ b/akka-actor/src/main/scala/akka/actor/Stash.scala @@ -198,11 +198,12 @@ private[akka] trait StashSupport { * if the `unstash()` call successfully returns or throws an exception. */ private[akka] def unstash(): Unit = - if (theStash.nonEmpty) try { - enqueueFirst(theStash.head) - } finally { - theStash = theStash.tail - } + if (theStash.nonEmpty) + try { + enqueueFirst(theStash.head) + } finally { + theStash = theStash.tail + } /** * Prepends all messages in the stash to the mailbox, and then clears the stash. diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 0e206b2acd..b68121d0e4 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -75,9 +75,9 @@ trait TypedActorFactory { * Creates a new TypedActor with the specified properties */ def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T]): R = { - val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver - val c = props.creator //Cache this to avoid closing over the Props - val i = props.interfaces //Cache this to avoid closing over the Props + val proxyVar = new AtomVar[R] // Chicken'n'egg-resolver + val c = props.creator // Cache this to avoid closing over the Props + val i = props.interfaces // Cache this to avoid closing over the Props val ap = Props(new TypedActor.TypedActor[R, T](proxyVar, c(), i)).withDeploy(props.actorProps().deploy) typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap)) } @@ -86,9 +86,9 @@ trait TypedActorFactory { * Creates a new TypedActor with the specified properties */ def typedActorOf[R <: AnyRef, T <: R](props: TypedProps[T], name: String): R = { - val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver - val c = props.creator //Cache this to avoid closing over the Props - val i = props.interfaces //Cache this to avoid closing over the Props + val proxyVar = new AtomVar[R] // Chicken'n'egg-resolver + val c = props.creator // Cache this to avoid closing over the Props + val i = props.interfaces // Cache this to avoid closing over the Props val ap = Props(new akka.actor.TypedActor.TypedActor[R, T](proxyVar, c(), i)).withDeploy(props.actorProps().deploy) typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap, name)) } @@ -167,7 +167,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi val p = ps(i) val s = serialization.findSerializerFor(p) val m = Serializers.manifestFor(s, p) - serializedParameters(i) = (s.identifier, m, s.toBinary(parameters(i))) //Mutable for the sake of sanity + serializedParameters(i) = (s.identifier, m, s.toBinary(parameters(i))) // Mutable for the sake of sanity } SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, serializedParameters) @@ -185,8 +185,8 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, String, Array[Byte])]) { - //TODO implement writeObject and readObject to serialize - //TODO Possible optimization is to special encode the parameter-types to conserve space + // TODO implement writeObject and readObject to serialize + // TODO Possible optimization is to special encode the parameter-types to conserve space @throws(classOf[ObjectStreamException]) private def readResolve(): AnyRef = { val system = akka.serialization.JavaSerializer.currentSystem.value if (system eq null) @@ -200,7 +200,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi case null => null case a if a.length == 0 => Array[AnyRef]() case a => - val deserializedParameters: Array[AnyRef] = new Array[AnyRef](a.length) //Mutable for the sake of sanity + val deserializedParameters: Array[AnyRef] = new Array[AnyRef](a.length) // Mutable for the sake of sanity for (i <- 0 until a.length) { val (sId, manifest, bytes) = a(i) deserializedParameters(i) = serialization.deserialize(bytes, sId, manifest).get @@ -300,7 +300,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi akka.actor.TypedActor(self.context.system).invocationHandlerFor(proxyVar.get) match { case null => case some => - some.actorVar.set(self.context.system.deadLetters) //Point it to the DLQ + some.actorVar.set(self.context.system.deadLetters) // Point it to the DLQ proxyVar.set(null.asInstanceOf[R]) } } @@ -310,7 +310,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi case l: PreRestart => l.preRestart(reason, message) case _ => self.context.children - .foreach(self.context.stop) //Can't be super.preRestart(reason, message) since that would invoke postStop which would set the actorVar to DL and proxyVar to null + .foreach(self.context.stop) // Can't be super.preRestart(reason, message) since that would invoke postStop which would set the actorVar to DL and proxyVar to null } } @@ -457,14 +457,14 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi case "toString" => actor.toString case "equals" => (args.length == 1 && (proxy eq args(0)) || actor == extension.getActorRefFor(args(0))) - .asInstanceOf[AnyRef] //Force boxing of the boolean + .asInstanceOf[AnyRef] // Force boxing of the boolean case "hashCode" => actor.hashCode.asInstanceOf[AnyRef] case _ => implicit val dispatcher = extension.system.dispatcher import akka.pattern.ask MethodCall(method, args) match { case m if m.isOneWay => - actor ! m; null //Null return value + actor ! m; null // Null return value case m if m.returnsFuture => ask(actor, m)(timeout).map { case NullResponse => null @@ -649,8 +649,6 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] ( /** * Scala API: return a new TypedProps that will use the specified Timeout for its non-void-returning methods, * if None is specified, it will use the default timeout as specified in the configuration. - * - * */ def withTimeout(timeout: Option[Timeout]): TypedProps[T] = this.copy(timeout = timeout) @@ -691,7 +689,7 @@ final case class ContextualTypedActorFactory(typedActor: TypedActorExtension, ac @nowarn("msg=deprecated") class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFactory with Extension { - import TypedActor._ //Import the goodies from the companion object + import TypedActor._ // Import the goodies from the companion object protected def actorFactory: ActorRefFactory = system protected def typedActor = this @@ -725,13 +723,13 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac props: TypedProps[T], proxyVar: AtomVar[R], actorRef: => ActorRef): R = { - //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling + // Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling val actorVar = new AtomVar[ActorRef](null) val proxy = Proxy .newProxyInstance( props.loader .orElse(props.interfaces.collectFirst { case any => any.getClassLoader }) - .orNull, //If we have no loader, we arbitrarily take the loader of the first interface + .orNull, // If we have no loader, we arbitrarily take the loader of the first interface props.interfaces.toArray, new TypedActorInvocationHandler(this, actorVar, props.timeout.getOrElse(DefaultReturnTimeout))) .asInstanceOf[R] @@ -741,7 +739,7 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac proxy } else { proxyVar.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive - actorVar.set(actorRef) //Make sure the InvocationHandler gets a hold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet + actorVar.set(actorRef) // Make sure the InvocationHandler gets a hold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet proxyVar.get } } @@ -751,7 +749,7 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac */ private[akka] def invocationHandlerFor(typedActor: AnyRef): TypedActorInvocationHandler = if ((typedActor ne null) && classOf[Proxy].isAssignableFrom(typedActor.getClass) && Proxy.isProxyClass( - typedActor.getClass)) typedActor match { + typedActor.getClass)) typedActor match { case null => null case other => Proxy.getInvocationHandler(other) match { @@ -759,5 +757,6 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac case handler: TypedActorInvocationHandler => handler case _ => null } - } else null + } + else null } diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala index 284e8965d4..8815d3f92f 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala @@ -122,9 +122,9 @@ private[akka] trait Children { this: ActorCell => } if (actor match { - case r: RepointableRef => r.isStarted - case _ => true - }) shallDie(actor) + case r: RepointableRef => r.isStarted + case _ => true + }) shallDie(actor) } actor.asInstanceOf[InternalActorRef].stop() } @@ -272,17 +272,16 @@ private[akka] trait Children { this: ActorCell => if (oldInfo eq null) Serialization.currentTransportInformation.value = system.provider.serializationInformation - props.args.forall( - arg => - arg == null || - arg.isInstanceOf[NoSerializationVerificationNeeded] || - settings.NoSerializationVerificationNeededClassPrefix.exists(arg.getClass.getName.startsWith) || { - val o = arg.asInstanceOf[AnyRef] - val serializer = ser.findSerializerFor(o) - val bytes = serializer.toBinary(o) - val ms = Serializers.manifestFor(serializer, o) - ser.deserialize(bytes, serializer.identifier, ms).get != null - }) + props.args.forall(arg => + arg == null || + arg.isInstanceOf[NoSerializationVerificationNeeded] || + settings.NoSerializationVerificationNeededClassPrefix.exists(arg.getClass.getName.startsWith) || { + val o = arg.asInstanceOf[AnyRef] + val serializer = ser.findSerializerFor(o) + val bytes = serializer.toBinary(o) + val ms = Serializers.manifestFor(serializer, o) + ser.deserialize(bytes, serializer.identifier, ms).get != null + }) } catch { case NonFatal(e) => throw new IllegalArgumentException(s"pre-creation serialization check failed at [${cell.self.path}/$name]", e) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala index 7d056a3db2..d615424616 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala @@ -173,7 +173,8 @@ private[akka] object ChildrenContainer { if (t.isEmpty) reason match { case Termination => TerminatedChildrenContainer case _ => NormalChildrenContainer(c - child.path.name) - } else copy(c - child.path.name, t) + } + else copy(c - child.path.name, t) } override def getByName(name: String): Option[ChildStats] = c.get(name) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala index 35e002155d..a7c0b7bfd5 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala @@ -29,7 +29,8 @@ private[akka] trait DeathWatch { this: ActorCell => maintainAddressTerminatedSubscription(a) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ updateWatching(a, None) - } else + } + else checkWatchingSame(a, None) } a @@ -44,7 +45,8 @@ private[akka] trait DeathWatch { this: ActorCell => maintainAddressTerminatedSubscription(a) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ updateWatching(a, Some(msg)) - } else + } + else checkWatchingSame(a, Some(msg)) } a diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala index 11f6a9833a..71e86414e5 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala @@ -36,8 +36,7 @@ final case class SerializationCheckFailedException private[dungeon] (msg: Object @InternalApi private[akka] trait Dispatch { this: ActorCell => - @nowarn @volatile private var _mailboxDoNotCallMeDirectly - : Mailbox = _ //This must be volatile since it isn't protected by the mailbox status + @nowarn @volatile private var _mailboxDoNotCallMeDirectly: Mailbox = _ // This must be volatile since it isn't protected by the mailbox status @nowarn private def _preventPrivateUnusedErasure = { _mailboxDoNotCallMeDirectly @@ -177,11 +176,12 @@ private[akka] trait Dispatch { this: ActorCell => if (system.settings.NoSerializationVerificationNeededClassPrefix.exists(msg.getClass.getName.startsWith)) envelope else { - val deserializedMsg = try { - serializeAndDeserializePayload(msg) - } catch { - case NonFatal(e) => throw SerializationCheckFailedException(msg, e) - } + val deserializedMsg = + try { + serializeAndDeserializePayload(msg) + } catch { + case NonFatal(e) => throw SerializationCheckFailedException(msg, e) + } envelope.message match { case dl: DeadLetter => envelope.copy(message = dl.copy(message = deserializedMsg)) case _ => envelope.copy(message = deserializedMsg) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala index eeecb3e13a..48537c863a 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala @@ -93,9 +93,10 @@ private[akka] trait FaultHandling { this: ActorCell => // if the actor fails in preRestart, we can do nothing but log it: it’s best-effort if (!isFailedFatally) failedActor.aroundPreRestart(cause, optionalMessage) } catch handleNonFatalOrInterruptedException { e => - val ex = PreRestartException(self, e, cause, optionalMessage) - publish(Error(ex, self.path.toString, clazz(failedActor), e.getMessage)) - } finally { + val ex = PreRestartException(self, e, cause, optionalMessage) + publish(Error(ex, self.path.toString, clazz(failedActor), e.getMessage)) + } + finally { clearActorFields(failedActor, recreate = true) } } @@ -162,8 +163,8 @@ private[akka] trait FaultHandling { this: ActorCell => finally clearFailed() try create(None) catch handleNonFatalOrInterruptedException { e => - handleInvokeFailure(Nil, e) - } + handleInvokeFailure(Nil, e) + } } protected def terminate(): Unit = { @@ -203,33 +204,34 @@ private[akka] trait FaultHandling { this: ActorCell => @InternalStableApi final def handleInvokeFailure(childrenNotToSuspend: immutable.Iterable[ActorRef], t: Throwable): Unit = { // prevent any further messages to be processed until the actor has been restarted - if (!isFailed) try { - suspendNonRecursive() - // suspend children - val skip: Set[ActorRef] = currentMessage match { - case Envelope(Failed(_, _, _), child) => setFailed(child); Set(child) - case _ => setFailed(self); Set.empty - } - suspendChildren(exceptFor = skip ++ childrenNotToSuspend) - t match { - // tell supervisor - case _: InterruptedException => - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - parent.sendSystemMessage(Failed(self, new ActorInterruptedException(t), uid)) - case _ => - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - parent.sendSystemMessage(Failed(self, t, uid)) - } - } catch handleNonFatalOrInterruptedException { e => - publish( - Error( - e, - self.path.toString, - clazz(actor), - "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t))) - try children.foreach(stop) - finally finishTerminate() - } + if (!isFailed) + try { + suspendNonRecursive() + // suspend children + val skip: Set[ActorRef] = currentMessage match { + case Envelope(Failed(_, _, _), child) => setFailed(child); Set(child) + case _ => setFailed(self); Set.empty + } + suspendChildren(exceptFor = skip ++ childrenNotToSuspend) + t match { + // tell supervisor + case _: InterruptedException => + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + parent.sendSystemMessage(Failed(self, new ActorInterruptedException(t), uid)) + case _ => + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + parent.sendSystemMessage(Failed(self, t, uid)) + } + } catch handleNonFatalOrInterruptedException { e => + publish( + Error( + e, + self.path.toString, + clazz(actor), + "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t))) + try children.foreach(stop) + finally finishTerminate() + } } private def finishTerminate(): Unit = { @@ -241,20 +243,26 @@ private[akka] trait FaultHandling { this: ActorCell => */ try if (a ne null) a.aroundPostStop() catch handleNonFatalOrInterruptedException { e => - publish(Error(e, self.path.toString, clazz(a), e.getMessage)) - } finally try stopFunctionRefs() - finally try dispatcher.detach(this) - finally try parent.sendSystemMessage( - DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) - finally try tellWatchersWeDied() - finally try unwatchWatchedActors(a) // stay here as we expect an emergency stop from handleInvokeFailure - finally { - if (system.settings.DebugLifecycle) - publish(Debug(self.path.toString, clazz(a), "stopped")) + publish(Error(e, self.path.toString, clazz(a), e.getMessage)) + } + finally + try stopFunctionRefs() + finally + try dispatcher.detach(this) + finally + try parent.sendSystemMessage( + DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) + finally + try tellWatchersWeDied() + finally + try unwatchWatchedActors(a) // stay here as we expect an emergency stop from handleInvokeFailure + finally { + if (system.settings.DebugLifecycle) + publish(Debug(self.path.toString, clazz(a), "stopped")) - clearActorFields(a, recreate = false) - clearFieldsForTermination() - } + clearActorFields(a, recreate = false) + clearFieldsForTermination() + } } private def finishRecreate(cause: Throwable): Unit = { @@ -272,17 +280,16 @@ private[akka] trait FaultHandling { this: ActorCell => if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(freshActor), "restarted")) // only after parent is up and running again do restart the children which were not stopped - survivors.foreach( - child => - try child.asInstanceOf[InternalActorRef].restart(cause) - catch handleNonFatalOrInterruptedException { e => + survivors.foreach(child => + try child.asInstanceOf[InternalActorRef].restart(cause) + catch handleNonFatalOrInterruptedException { e => publish(Error(e, self.path.toString, clazz(freshActor), "restarting " + child)) }) } catch handleNonFatalOrInterruptedException { e => - setFailedFatally() - clearActorFields(actor, recreate = false) // in order to prevent preRestart() from happening again - handleInvokeFailure(survivors, PostRestartException(self, e, cause)) - } + setFailedFatally() + clearActorFields(actor, recreate = false) // in order to prevent preRestart() from happening again + handleInvokeFailure(survivors, PostRestartException(self, e, cause)) + } } final protected def handleFailure(f: Failed): Unit = { @@ -317,9 +324,9 @@ private[akka] trait FaultHandling { this: ActorCell => if (actor != null) { try actor.supervisorStrategy.handleChildTerminated(this, child, children) catch handleNonFatalOrInterruptedException { e => - publish(Error(e, self.path.toString, clazz(actor), "handleChildTerminated failed")) - handleInvokeFailure(Nil, e) - } + publish(Error(e, self.path.toString, clazz(actor), "handleChildTerminated failed")) + handleInvokeFailure(Nil, e) + } } /* * if the removal changed the state of the (terminating) children container, diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala index 13a8338f64..996091a2d3 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala @@ -48,7 +48,7 @@ private[akka] trait ReceiveTimeout { this: ActorCell => } private def rescheduleReceiveTimeout(f: FiniteDuration): Unit = { - receiveTimeoutData._2.cancel() //Cancel any ongoing future + receiveTimeoutData._2.cancel() // Cancel any ongoing future val task = system.scheduler.scheduleOnce(f, self, akka.actor.ReceiveTimeout)(this.dispatcher) receiveTimeoutData = (f, task) } diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index ca11084607..671fb580e7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -63,16 +63,17 @@ private[akka] trait LoadMetrics { self: Executor => * INTERNAL API */ private[akka] object MessageDispatcher { - val UNSCHEDULED = 0 //WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher + val UNSCHEDULED = 0 // WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher val SCHEDULED = 1 val RESCHEDULED = 2 // dispatcher debugging helper using println (see below) // since this is a compile-time constant, scalac will elide code behind if (MessageDispatcher.debug) (RK checked with 2.9.1) final val debug = false // Deliberately without type ascription to make it a compile-time constant - lazy val actors = new Index[MessageDispatcher, ActorRef](16, new ju.Comparator[ActorRef] { - override def compare(a: ActorRef, b: ActorRef): Int = a.compareTo(b) - }) + lazy val actors = new Index[MessageDispatcher, ActorRef](16, + new ju.Comparator[ActorRef] { + override def compare(a: ActorRef, b: ActorRef): Int = a.compareTo(b) + }) def printActors(): Unit = if (debug) { for { @@ -194,9 +195,9 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator private def scheduleShutdownAction(): Unit = { // IllegalStateException is thrown if scheduler has been shutdown try prerequisites.scheduler.scheduleOnce(shutdownTimeout, shutdownAction)(new ExecutionContext { - override def execute(runnable: Runnable): Unit = runnable.run() - override def reportFailure(t: Throwable): Unit = MessageDispatcher.this.reportFailure(t) - }) + override def execute(runnable: Runnable): Unit = runnable.run() + override def reportFailure(t: Throwable): Unit = MessageDispatcher.this.reportFailure(t) + }) catch { case _: IllegalStateException => shutdown() @@ -239,7 +240,7 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator shutdownSchedule match { case SCHEDULED => try { - if (inhabitants == 0) shutdown() //Warning, racy + if (inhabitants == 0) shutdown() // Warning, racy } finally { while (!updateShutdownSchedule(shutdownSchedule, UNSCHEDULED)) {} } @@ -371,8 +372,8 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites: .recover { case exception => throw new IllegalArgumentException( - ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], - make sure it has an accessible constructor with a [%s,%s] signature""") + """Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], + make sure it has an accessible constructor with a [%s,%s] signature""" .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception) } @@ -407,7 +408,7 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr case size if size > 0 => Some(config.getString("task-queue-type")) .map { - case "array" => ThreadPoolConfig.arrayBlockingQueue(size, false) //TODO config fairness? + case "array" => ThreadPoolConfig.arrayBlockingQueue(size, false) // TODO config fairness? case "" | "linked" => ThreadPoolConfig.linkedBlockingQueue(size) case x => throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!".format(x)) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 586519de27..c8bea5f2ca 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -68,7 +68,7 @@ private[akka] class BalancingDispatcher( with DefaultSystemMessageQueue { override def cleanUp(): Unit = { val dlq = mailboxes.deadLetterMailbox - //Don't call the original implementation of this since it scraps all messages, and we don't want to do that + // Don't call the original implementation of this since it scraps all messages, and we don't want to do that var messages = systemDrain(new LatestFirstSystemMessageList(NoMessage)) while (messages.nonEmpty) { // message must be “virgin” before being able to systemEnqueue again @@ -103,12 +103,12 @@ private[akka] class BalancingDispatcher( if (attemptTeamWork) { @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = if (messageQueue.hasMessages - && i.hasNext - && (executorService.executor match { - case lm: LoadMetrics => !lm.atFullThrottle() - case _ => true - }) - && !registerForExecution(i.next.mailbox, false, false)) + && i.hasNext + && (executorService.executor match { + case lm: LoadMetrics => !lm.atFullThrottle() + case _ => true + }) + && !registerForExecution(i.next.mailbox, false, false)) scheduleOne(i) scheduleOne() diff --git a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala index d7e58bb538..4bdf87aeb4 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala @@ -68,7 +68,7 @@ private[akka] trait BatchingExecutor extends Executor { val current = _tasksLocal.get() _tasksLocal.remove() if ((current eq this) && !current.isEmpty) { // Resubmit ourselves if something bad happened and we still have work to do - unbatchedExecute(current) //TODO what if this submission fails? + unbatchedExecute(current) // TODO what if this submission fails? true } else false } diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index b23304e36b..75b2300f41 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -121,7 +121,7 @@ class Dispatcher( mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = { - if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races + if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { // This needs to be here to ensure thread safety and no races if (mbox.setAsScheduled()) { try { executorService.execute(mbox) @@ -131,7 +131,7 @@ class Dispatcher( try { executorService.execute(mbox) true - } catch { //Retry once + } catch { // Retry once case e: RejectedExecutionException => mbox.setAsIdle() eventStream.publish(Error(e, getClass.getName, getClass, "registerForExecution was rejected twice!")) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index dccb603e67..7c874f705c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -20,7 +20,7 @@ import akka.annotation.InternalApi import akka.annotation.InternalStableApi import akka.compat import akka.dispatch.internal.SameThreadExecutionContext -import akka.japi.{ Procedure, Function => JFunc, Option => JOption } +import akka.japi.{ Function => JFunc, Option => JOption, Procedure } import akka.util.unused /** diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 6bde0d2251..2238b3e51e 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -106,10 +106,10 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) def numberOfMessages: Int = messageQueue.numberOfMessages @volatile - protected var _statusDoNotCallMeDirectly: Status = _ //0 by default + protected var _statusDoNotCallMeDirectly: Status = _ // 0 by default @volatile - protected var _systemQueueDoNotCallMeDirectly: SystemMessage = _ //null by default + protected var _systemQueueDoNotCallMeDirectly: SystemMessage = _ // null by default @inline final def currentStatus: Mailbox.Status = Unsafe.instance.getIntVolatile(this, AbstractMailbox.mailboxStatusOffset) @@ -226,12 +226,12 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) override final def run(): Unit = { try { - if (!isClosed) { //Volatile read, needed here - processAllSystemMessages() //First, deal with any system messages - processMailbox() //Then deal with messages + if (!isClosed) { // Volatile read, needed here + processAllSystemMessages() // First, deal with any system messages + processMailbox() // Then deal with messages } } finally { - setAsIdle() //Volatile write, needed here + setAsIdle() // Volatile write, needed here dispatcher.registerForExecution(this, false, false) } } @@ -998,27 +998,28 @@ object BoundedControlAwareMailbox { var remaining = pushTimeOut.toNanos putLock.lockInterruptibly() - val inserted = try { - var stop = false - while (size.get() == capacity && !stop) { - remaining = notFull.awaitNanos(remaining) - stop = remaining <= 0 + val inserted = + try { + var stop = false + while (size.get() == capacity && !stop) { + remaining = notFull.awaitNanos(remaining) + stop = remaining <= 0 + } + + if (stop) { + false + } else { + q.add(envelope) + val c = size.incrementAndGet() + + if (c < capacity) notFull.signal() + + true + } + } finally { + putLock.unlock() } - if (stop) { - false - } else { - q.add(envelope) - val c = size.incrementAndGet() - - if (c < capacity) notFull.signal() - - true - } - } finally { - putLock.unlock() - } - if (!inserted) { receiver .asInstanceOf[InternalActorRef] diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala index e05e6c0368..ddddd0e3af 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala @@ -272,7 +272,7 @@ private[akka] class Mailboxes( private final def warn(msg: String): Unit = eventStream.publish(Warning("mailboxes", getClass, msg)) - //INTERNAL API + // INTERNAL API private def config(id: String): Config = { import akka.util.ccompat.JavaConverters._ ConfigFactory diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index 4239f57a50..b9168571a8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -32,7 +32,7 @@ class PinnedDispatcher( @volatile private var owner: ActorCell = _actor - //Relies on an external lock provided by MessageDispatcher.attach + // Relies on an external lock provided by MessageDispatcher.attach protected[akka] override def register(actorCell: ActorCell) = { val actor = owner if ((actor ne null) && actorCell != actor) @@ -40,7 +40,7 @@ class PinnedDispatcher( owner = actorCell super.register(actorCell) } - //Relies on an external lock provided by MessageDispatcher.detach + // Relies on an external lock provided by MessageDispatcher.detach protected[akka] override def unregister(actor: ActorCell) = { super.unregister(actor) owner = null diff --git a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala index 0595446669..a8dec2d298 100644 --- a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala +++ b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala @@ -170,7 +170,6 @@ private[akka] class AffinityPool( * due to an exception being thrown in user code, the worker is * responsible for adding one more worker to compensate for its * own termination - * */ private def onWorkerExit(w: AffinityPoolWorker, abruptTermination: Boolean): Unit = bookKeepingLock.withGuard { @@ -410,7 +409,7 @@ private[akka] final class FairDistributionHashCache(val config: Config) extends override def toString: String = s"FairDistributionHashCache(fairDistributionThreshold = $fairDistributionThreshold)" private[this] final def improve(h: Int): Int = - 0x7FFFFFFF & (reverseBytes(h * 0x9e3775cd) * 0x9e3775cd) // `sbhash`: In memory of Phil Bagwell. + 0x7FFFFFFF & (reverseBytes(h * 0x9E3775CD) * 0x9E3775CD) // `sbhash`: In memory of Phil Bagwell. override final def getQueue(command: Runnable, queues: Int): Int = { val runnableHash = command.hashCode() if (fairDistributionThreshold == 0) diff --git a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala index 299ad1904b..331041fdc8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala +++ b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala @@ -36,7 +36,6 @@ private[akka] object SystemMessageList { } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -48,7 +47,6 @@ private[akka] object SystemMessageList { * * The type of the list also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[akka] class LatestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ @@ -97,7 +95,6 @@ private[akka] class LatestFirstSystemMessageList(val head: SystemMessage) extend } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -109,7 +106,6 @@ private[akka] class LatestFirstSystemMessageList(val head: SystemMessage) extend * * This list type also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[akka] class EarliestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala index 80aa6c5e3d..1d36e70dde 100644 --- a/akka-actor/src/main/scala/akka/event/EventBus.scala +++ b/akka-actor/src/main/scala/akka/event/EventBus.scala @@ -25,7 +25,7 @@ trait EventBus { type Classifier type Subscriber - //#event-bus-api + // #event-bus-api /** * Attempts to register the subscriber to the specified Classifier * @return true if successful and false if not (because it was already @@ -49,7 +49,7 @@ trait EventBus { * Publishes the specified Event to this bus */ def publish(event: Event): Unit - //#event-bus-api + // #event-bus-api } /** @@ -82,9 +82,10 @@ trait PredicateClassifier { this: EventBus => */ trait LookupClassification { this: EventBus => - protected final val subscribers = new Index[Classifier, Subscriber](mapSize(), new Comparator[Subscriber] { - def compare(a: Subscriber, b: Subscriber): Int = compareSubscribers(a, b) - }) + protected final val subscribers = new Index[Classifier, Subscriber](mapSize(), + new Comparator[Subscriber] { + def compare(a: Subscriber, b: Subscriber): Int = compareSubscribers(a, b) + }) /** * This is a size hint for the number of Classifiers you expect to have (use powers of 2) @@ -297,7 +298,7 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier => /** The unsubscriber takes care of unsubscribing actors, which have terminated. */ protected lazy val unsubscriber = - ActorClassificationUnsubscriber.start(system, this.toString(), (this.unsubscribe: ActorRef => Unit)) + ActorClassificationUnsubscriber.start(system, this.toString(), this.unsubscribe: ActorRef => Unit) @tailrec protected final def associate(monitored: ActorRef, monitor: ActorRef): Boolean = { diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 50ba6893fd..c203f284e5 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -144,12 +144,13 @@ trait LoggingBus extends ActorEventBus { try { if (system.settings.DebugUnhandledMessage) subscribe( - system.systemActorOf(Props(new Actor { - def receive = { - case UnhandledMessage(msg, sender, rcp) => - publish(Debug(rcp.path.toString, rcp.getClass, "unhandled message from " + sender + ": " + msg)) - } - }), "UnhandledMessageForwarder"), + system.systemActorOf( + Props(new Actor { + def receive = { + case UnhandledMessage(msg, sender, rcp) => + publish(Debug(rcp.path.toString, rcp.getClass, "unhandled message from " + sender + ": " + msg)) + } + }), "UnhandledMessageForwarder"), classOf[UnhandledMessage]) } catch { case _: InvalidActorNameException => // ignore if it is already running @@ -202,16 +203,17 @@ trait LoggingBus extends ActorEventBus { val actor = system.systemActorOf(Props(clazz).withDispatcher(system.settings.LoggersDispatcher), name) implicit def timeout: Timeout = system.settings.LoggerStartTimeout import akka.pattern.ask - val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) - catch { - case _: TimeoutException => - publish( - Warning( - logName, - this.getClass, - "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) - "[TIMEOUT]" - } + val response = + try Await.result(actor ? InitializeLogger(this), timeout.duration) + catch { + case _: TimeoutException => + publish( + Warning( + logName, + this.getClass, + "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) + "[TIMEOUT]" + } if (response != LoggerInitialized) throw new LoggerInitializationException( "Logger " + name + " did not respond with LoggerInitialized, sent instead " + response) @@ -1671,7 +1673,7 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { def clearMDC(): Unit = mdc(emptyMDC) } -/** DO NOT INHERIT: Class is open only for use by akka-slf4j*/ +/** DO NOT INHERIT: Class is open only for use by akka-slf4j */ @DoNotInherit class LogMarker(val name: String, val properties: Map[String, Any]) { diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 9f6b601b57..5230a0c395 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -80,7 +80,7 @@ class LoggingReceive(source: Option[AnyRef], r: Receive, label: Option[String], val src = source.getOrElse(context.asInstanceOf[ActorCell].actor) val (str, clazz) = LogSource.fromAnyRef(src) val message = "received " + (if (handled) "handled" else "unhandled") + " message " + o + " from " + context - .sender() + + .sender() + (label match { case Some(l) => " in state " + l case _ => "" diff --git a/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala b/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala index d7a9dffdd9..706125109b 100644 --- a/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala +++ b/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala @@ -88,9 +88,10 @@ private[akka] object DirectByteBufferPool { { (bb: ByteBuffer) => try if (bb.isDirect) { - val cleaner = cleanerMethod.invoke(bb) - cleanMethod.invoke(cleaner) - } catch { case NonFatal(_) => /* ok, best effort attempt to cleanup failed */ } + val cleaner = cleanerMethod.invoke(bb) + cleanMethod.invoke(cleaner) + } + catch { case NonFatal(_) => /* ok, best effort attempt to cleanup failed */ } } } catch { case NonFatal(_) => _ => () /* reflection failed, use no-op fallback */ } diff --git a/akka-actor/src/main/scala/akka/io/Dns.scala b/akka-actor/src/main/scala/akka/io/Dns.scala index ceb92e2c54..b529122b2a 100644 --- a/akka-actor/src/main/scala/akka/io/Dns.scala +++ b/akka-actor/src/main/scala/akka/io/Dns.scala @@ -95,10 +95,11 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { @deprecated("Use cached(DnsProtocol.Resolve)", "2.6.0") def apply(newProtocol: DnsProtocol.Resolved): Resolved = { - Resolved(newProtocol.name, newProtocol.records.collect { - case r: ARecord => r.ip - case r: AAAARecord => r.ip - }) + Resolved(newProtocol.name, + newProtocol.records.collect { + case r: ARecord => r.ip + case r: AAAARecord => r.ip + }) } } diff --git a/akka-actor/src/main/scala/akka/io/DnsProvider.scala b/akka-actor/src/main/scala/akka/io/DnsProvider.scala index 9a735a63d0..d2f23aee47 100644 --- a/akka-actor/src/main/scala/akka/io/DnsProvider.scala +++ b/akka-actor/src/main/scala/akka/io/DnsProvider.scala @@ -12,7 +12,6 @@ import akka.actor.Actor * It is expected that this will be deprecated/removed in future Akka versions * * TODO make private and remove deprecated in 2.7.0 - * */ @deprecated("Overriding the DNS implementation will be removed in future versions of Akka", "2.6.0") trait DnsProvider { diff --git a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala index 380328b75d..98187f8444 100644 --- a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala +++ b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala @@ -132,8 +132,9 @@ private[io] object SelectionHandler { decision: SupervisorStrategy.Directive): Unit = if (cause.isInstanceOf[DeathPactException]) { try context.system.eventStream.publish { - Logging.Debug(child.path.toString, getClass, "Closed after handler termination") - } catch { case NonFatal(_) => } + Logging.Debug(child.path.toString, getClass, "Closed after handler termination") + } + catch { case NonFatal(_) => } } else super.logFailure(context, child, cause, decision) } diff --git a/akka-actor/src/main/scala/akka/io/Tcp.scala b/akka-actor/src/main/scala/akka/io/Tcp.scala index 87c0baeffc..f311f7c120 100644 --- a/akka-actor/src/main/scala/akka/io/Tcp.scala +++ b/akka-actor/src/main/scala/akka/io/Tcp.scala @@ -625,11 +625,11 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension { val FinishConnectRetries: Int = getInt("finish-connect-retries").requiring(_ > 0, "finish-connect-retries must be > 0") - val WindowsConnectionAbortWorkaroundEnabled - : Boolean = getString("windows-connection-abort-workaround-enabled") match { - case "auto" => Helpers.isWindows - case _ => getBoolean("windows-connection-abort-workaround-enabled") - } + val WindowsConnectionAbortWorkaroundEnabled: Boolean = + getString("windows-connection-abort-workaround-enabled") match { + case "auto" => Helpers.isWindows + case _ => getBoolean("windows-connection-abort-workaround-enabled") + } private[this] def getIntBytes(path: String): Int = { val size = getBytes(path) @@ -640,7 +640,6 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension { } /** - * */ val manager: ActorRef = { system.systemActorOf( diff --git a/akka-actor/src/main/scala/akka/io/TcpConnection.scala b/akka-actor/src/main/scala/akka/io/TcpConnection.scala index 986b61e1ba..1fa7fe8a4e 100644 --- a/akka-actor/src/main/scala/akka/io/TcpConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpConnection.scala @@ -79,7 +79,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha // if we are in push mode or already have resumed reading in pullMode while waiting for Register // then register OP_READ interest - if (!pullMode || (/*pullMode && */ !readingSuspended)) resumeReading(info, None) + if (!pullMode || ( /*pullMode && */ !readingSuspended)) resumeReading(info, None) case ResumeReading => readingSuspended = false @@ -286,16 +286,17 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha val buffer = bufferPool.acquire() try innerRead(buffer, ReceivedMessageSizeLimit) match { - case AllRead => // nothing to do - case MoreDataWaiting => - if (!pullMode) self ! ChannelReadable - case EndOfStream if channel.socket.isOutputShutdown => - if (TraceLogging) log.debug("Read returned end-of-stream, our side already closed") - doCloseConnection(info.handler, closeCommander, ConfirmedClosed) - case EndOfStream => - if (TraceLogging) log.debug("Read returned end-of-stream, our side not yet closed") - handleClose(info, closeCommander, PeerClosed) - } catch { + case AllRead => // nothing to do + case MoreDataWaiting => + if (!pullMode) self ! ChannelReadable + case EndOfStream if channel.socket.isOutputShutdown => + if (TraceLogging) log.debug("Read returned end-of-stream, our side already closed") + doCloseConnection(info.handler, closeCommander, ConfirmedClosed) + case EndOfStream => + if (TraceLogging) log.debug("Read returned end-of-stream, our side not yet closed") + handleClose(info, closeCommander, PeerClosed) + } + catch { case e: IOException => handleError(info.handler, e) } finally bufferPool.release(buffer) } @@ -529,7 +530,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha self ! UpdatePendingWriteAndThen(updated, TcpConnection.doNothing) } else { release() - val andThen = if (!ack.isInstanceOf[NoAck])() => commander ! ack else doNothing + val andThen = if (!ack.isInstanceOf[NoAck]) () => commander ! ack else doNothing self ! UpdatePendingWriteAndThen(PendingWrite(commander, tail), andThen) } } catch { diff --git a/akka-actor/src/main/scala/akka/io/TcpManager.scala b/akka-actor/src/main/scala/akka/io/TcpManager.scala index f94401ed9e..af5f842180 100644 --- a/akka-actor/src/main/scala/akka/io/TcpManager.scala +++ b/akka-actor/src/main/scala/akka/io/TcpManager.scala @@ -43,7 +43,6 @@ import akka.actor.{ ActorLogging, Props } * If the connect request is rejected because the Tcp system is not able to register more channels (see the nr-of-selectors * and max-channels configuration options in the akka.io.tcp section of the configuration) the sender will be notified * with a [[akka.io.Tcp.CommandFailed]] message. This message contains the original command for reference. - * */ private[io] class TcpManager(tcp: TcpExt) extends SelectionHandler.SelectorBasedManager(tcp.Settings, tcp.Settings.NrOfSelectors) @@ -52,11 +51,11 @@ private[io] class TcpManager(tcp: TcpExt) def receive = workerForCommandHandler { case c: Connect => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[TcpOutgoingConnection], tcp, registry, commander, c)) + registry => Props(classOf[TcpOutgoingConnection], tcp, registry, commander, c) case b: Bind => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[TcpListener], selectorPool, tcp, registry, commander, b)) + registry => Props(classOf[TcpListener], selectorPool, tcp, registry, commander, b) } } diff --git a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala index 3f1e709f1b..98c2e3a33c 100644 --- a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala @@ -43,7 +43,7 @@ private[io] class TcpOutgoingConnection( options.foreach(_.beforeConnect(channel.socket)) localAddress.foreach(channel.socket.bind) channelRegistry.register(channel, 0) - timeout.foreach(context.setReceiveTimeout) //Initiate connection timeout if supplied + timeout.foreach(context.setReceiveTimeout) // Initiate connection timeout if supplied private def stop(cause: Throwable): Unit = stopWith(CloseInformation(Set(commander), CommandFailed(connect).withCause(cause)), shouldAbort = true) diff --git a/akka-actor/src/main/scala/akka/io/UdpManager.scala b/akka-actor/src/main/scala/akka/io/UdpManager.scala index 462f7621e5..8054b6ad55 100644 --- a/akka-actor/src/main/scala/akka/io/UdpManager.scala +++ b/akka-actor/src/main/scala/akka/io/UdpManager.scala @@ -42,7 +42,6 @@ import akka.io.Udp._ * message that the service is available. UDP datagrams can be sent by sending [[akka.io.Udp.Send]] messages to the * sender of SimpleSenderReady. All the datagrams will contain an ephemeral local port as sender and answers will be * discarded. - * */ private[io] class UdpManager(udp: UdpExt) extends SelectionHandler.SelectorBasedManager(udp.settings, udp.settings.NrOfSelectors) { @@ -50,11 +49,11 @@ private[io] class UdpManager(udp: UdpExt) def receive = workerForCommandHandler { case b: Bind => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[UdpListener], udp, registry, commander, b)) + registry => Props(classOf[UdpListener], udp, registry, commander, b) case s: SimpleSender => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[UdpSender], udp, registry, commander, s.options)) + registry => Props(classOf[UdpSender], udp, registry, commander, s.options) } } diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala b/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala index 551e959e67..ef22137a93 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala @@ -24,7 +24,6 @@ import akka.util.ccompat.JavaConverters._ * * Allows for more detailed lookups, by specifying which records should be checked, * and responses can more information than plain IP addresses (e.g. ports for SRV records). - * */ object DnsProtocol { @@ -88,13 +87,12 @@ object DnsProtocol { * Java API * * Records that relate to the query but are not strickly answers e.g. A records for the records returned for an SRV query. - * */ def getAdditionalRecords(): util.List[ResourceRecord] = additionalRecords.asJava private val _address: Option[InetAddress] = { val ipv4: Option[Inet4Address] = records.collectFirst { case ARecord(_, _, ip: Inet4Address) => ip } - val ipv6: Option[Inet6Address] = records.collectFirst { case AAAARecord(_, _, ip) => ip } + val ipv6: Option[Inet6Address] = records.collectFirst { case AAAARecord(_, _, ip) => ip } IpVersionSelector.getInetAddress(ipv4, ipv6) } diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala index 552029265a..fcfabbbdb4 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala @@ -150,7 +150,7 @@ private[dns] object ResourceRecord { // If the number of cases increase remember to add a `@switch` annotation e.g.: // val ttl = (it.getInt: @switch) match { // According to https://www.ietf.org/rfc/rfc1035.txt: "TTL: positive values of a signed 32 bit number." - val ttl = (it.getInt) match { + val ttl = it.getInt match { case 0 => Ttl.never case nonZero => Ttl.fromPositive(nonZero.seconds) } diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala index 6fa99d734b..046e9fd213 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala @@ -71,9 +71,10 @@ private[io] final class AsyncDnsManager( private val resolver = { val props: Props = FromConfig.props( - Props(provider.actorClass, settings, cache, (factory: ActorRefFactory, dns: List[InetSocketAddress]) => { - dns.map(ns => factory.actorOf(Props(new DnsClient(ns)))) - }).withDeploy(Deploy.local).withDispatcher(dispatcher)) + Props(provider.actorClass, settings, cache, + (factory: ActorRefFactory, dns: List[InetSocketAddress]) => { + dns.map(ns => factory.actorOf(Props(new DnsClient(ns)))) + }).withDeploy(Deploy.local).withDispatcher(dispatcher)) context.actorOf(props, name) } diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala index c0a3f28a5a..e59f607547 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala @@ -57,7 +57,7 @@ private[internal] case class MessageFlags(flags: Short) extends AnyVal { def isRecursionAvailable: Boolean = (flags & (1 << 7)) != 0 def responseCode: ResponseCode.Value = { - ResponseCode(flags & 0x0f) + ResponseCode(flags & 0x0F) } override def toString: String = { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala index d0fb3f0902..e9d86b119f 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala @@ -38,8 +38,8 @@ private[akka] object DomainName { if (ret.nonEmpty) ret.append('.') - if ((length & 0xc0) == 0xc0) { - val offset = ((length.toShort & 0x3f) << 8) | (it.getByte.toShort & 0x00ff) + if ((length & 0xC0) == 0xC0) { + val offset = ((length.toShort & 0x3F) << 8) | (it.getByte.toShort & 0x00FF) return ret.result() + parse(msg.iterator.drop(offset), msg) } diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 8d863a3f10..9b0222f2f4 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -81,7 +81,6 @@ trait AskSupport { * EnrichedMessage(response) * } pipeTo nextActor * }}} - * */ def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef.internalAsk(message, timeout, ActorRef.noSender) @@ -148,7 +147,6 @@ trait AskSupport { * EnrichedMessage(response) * } pipeTo nextActor * }}} - * */ def ask(actorSelection: ActorSelection, message: Any)(implicit timeout: Timeout): Future[Any] = actorSelection.internalAsk(message, timeout, ActorRef.noSender) @@ -270,7 +268,6 @@ trait ExplicitAskSupport { * EnrichedMessage(response) * } pipeTo nextActor * }}} - * */ def ask(actorSelection: ActorSelection, messageFactory: ActorRef => Any)(implicit timeout: Timeout): Future[Any] = actorSelection.internalAsk(messageFactory, timeout, ActorRef.noSender) @@ -335,7 +332,7 @@ final class AskableActorRef(val actorRef: ActorRef) extends AnyVal { protected def ask(message: Any, timeout: Timeout): Future[Any] = internalAsk(message, timeout, ActorRef.noSender) - //todo add scaladoc + // todo add scaladoc def ask(message: Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAsk(message, timeout, sender) diff --git a/akka-actor/src/main/scala/akka/pattern/Backoff.scala b/akka-actor/src/main/scala/akka/pattern/Backoff.scala index d2325f7ca9..64b4a9b266 100644 --- a/akka-actor/src/main/scala/akka/pattern/Backoff.scala +++ b/akka-actor/src/main/scala/akka/pattern/Backoff.scala @@ -68,7 +68,6 @@ object Backoff { * @param maxNrOfRetries maximum number of attempts to restart the child actor. * The supervisor will terminate itself after the maxNoOfRetries is reached. * In order to restart infinitely pass in `-1`. - * */ @deprecated("Use BackoffOpts.onFailure instead", "2.5.22") def onFailure( @@ -606,7 +605,7 @@ private final case class BackoffOptionsImpl( } backoffType match { - //onFailure method in companion object + // onFailure method in companion object case RestartImpliesFailure => Props( new BackoffOnRestartSupervisor( @@ -618,7 +617,7 @@ private final case class BackoffOptionsImpl( randomFactor, supervisorStrategy, replyWhileStopped.map(msg => ReplyWith(msg)).getOrElse(ForwardDeathLetters))) - //onStop method in companion object + // onStop method in companion object case StopImpliesFailure => Props( new BackoffOnStopSupervisor( diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala index 0283a77726..facad6115c 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala @@ -323,7 +323,7 @@ object BackoffSupervisor { } } -final class BackoffSupervisor @deprecated("Use `BackoffSupervisor.props` method instead", since = "2.5.22")( +final class BackoffSupervisor @deprecated("Use `BackoffSupervisor.props` method instead", since = "2.5.22") ( override val childProps: Props, override val childName: String, minBackoff: FiniteDuration, diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index 95e4bd11d7..ba35194a44 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -353,7 +353,6 @@ class CircuitBreaker( * @param body Call needing protected * @return [[scala.concurrent.Future]] containing the call result or a * `scala.concurrent.TimeoutException` if the call timed out - * */ def withCircuitBreaker[T](body: => Future[T]): Future[T] = currentState.invoke(body, failureFn) @@ -408,8 +407,8 @@ class CircuitBreaker( body: Callable[CompletionStage[T]], defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): CompletionStage[T] = FutureConverters.toJava[T](callWithCircuitBreaker(new Callable[Future[T]] { - override def call(): Future[T] = FutureConverters.toScala(body.call()) - }, defineFailureFn)) + override def call(): Future[T] = FutureConverters.toScala(body.call()) + }, defineFailureFn)) /** * Wraps invocations of synchronous calls that need to be protected. @@ -925,20 +924,17 @@ class CircuitBreaker( /** * Invoked when call succeeds - * */ def callSucceeds(): Unit /** * Invoked when call fails - * */ def callFails(): Unit /** * Invoked on the transitioned-to state during transition. Notifies listeners after invoking subclass template * method _enter - * */ final def enter(): Unit = { _enter() @@ -947,7 +943,6 @@ class CircuitBreaker( /** * Template method for concrete traits - * */ def _enter(): Unit } diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 0703bf483c..6b0d838c89 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -53,8 +53,8 @@ trait GracefulStopSupport { internalTarget.sendSystemMessage(Watch(internalTarget, ref)) target.tell(stopMessage, Actor.noSender) ref.result.future.transform({ - case Terminated(t) if t.path == target.path => true - case _ => { internalTarget.sendSystemMessage(Unwatch(target, ref)); false } - }, t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ExecutionContexts.parasitic) + case Terminated(t) if t.path == target.path => true + case _ => { internalTarget.sendSystemMessage(Unwatch(target, ref)); false } + }, t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ExecutionContexts.parasitic) } } diff --git a/akka-actor/src/main/scala/akka/routing/Balancing.scala b/akka-actor/src/main/scala/akka/routing/Balancing.scala index d5f0333286..aab33336e8 100644 --- a/akka-actor/src/main/scala/akka/routing/Balancing.scala +++ b/akka-actor/src/main/scala/akka/routing/Balancing.scala @@ -144,7 +144,7 @@ final case class BalancingPool( other match { case p: Pool => if ((this.supervisorStrategy eq Pool.defaultSupervisorStrategy) - && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) + && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) this.withSupervisorStrategy(p.supervisorStrategy) else this diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index 844de44174..aee861c101 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -17,7 +17,6 @@ import scala.reflect.ClassTag * * Note that toString of the ring nodes are used for the node * hash, i.e. make sure it is different for different nodes. - * */ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], val virtualNodesFactor: Int) { @@ -41,8 +40,8 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v def :+(node: T): ConsistentHash[T] = { val nodeHash = hashFor(node.toString) new ConsistentHash(nodes ++ ((1 to virtualNodesFactor).map { r => - (concatenateNodeHash(nodeHash, r) -> node) - }), virtualNodesFactor) + concatenateNodeHash(nodeHash, r) -> node + }), virtualNodesFactor) } /** @@ -60,8 +59,8 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v def :-(node: T): ConsistentHash[T] = { val nodeHash = hashFor(node.toString) new ConsistentHash(nodes -- ((1 to virtualNodesFactor).map { r => - concatenateNodeHash(nodeHash, r) - }), virtualNodesFactor) + concatenateNodeHash(nodeHash, r) + }), virtualNodesFactor) } /** @@ -119,7 +118,7 @@ object ConsistentHash { node <- nodes nodeHash = hashFor(node.toString) vnode <- 1 to virtualNodesFactor - } yield (concatenateNodeHash(nodeHash, vnode) -> node)), + } yield concatenateNodeHash(nodeHash, vnode) -> node), virtualNodesFactor) } diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala index c1b443bfff..6dc89b2ac3 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala @@ -143,7 +143,6 @@ object ConsistentHashingRoutingLogic { * use for the consistent hash key * * @param system the actor system hosting this router - * */ @SerialVersionUID(1L) final case class ConsistentHashingRoutingLogic( @@ -347,7 +346,7 @@ final case class ConsistentHashingPool( */ override def withFallback(other: RouterConfig): RouterConfig = other match { case _: FromConfig | _: NoRouter => this.overrideUnsetConfig(other) - case otherRouter: ConsistentHashingPool => (copy(hashMapping = otherRouter.hashMapping)).overrideUnsetConfig(other) + case otherRouter: ConsistentHashingPool => copy(hashMapping = otherRouter.hashMapping).overrideUnsetConfig(other) case _ => throw new IllegalArgumentException("Expected ConsistentHashingPool, got [%s]".format(other)) } diff --git a/akka-actor/src/main/scala/akka/routing/MurmurHash.scala b/akka-actor/src/main/scala/akka/routing/MurmurHash.scala index 919f9c1e55..a4e70268f3 100644 --- a/akka-actor/src/main/scala/akka/routing/MurmurHash.scala +++ b/akka-actor/src/main/scala/akka/routing/MurmurHash.scala @@ -38,18 +38,18 @@ import akka.util.ccompat._ object MurmurHash { // Magic values used for MurmurHash's 32 bit hash. // Don't change these without consulting a hashing expert! - final private val visibleMagic: Int = 0x971e137b + final private val visibleMagic: Int = 0x971E137B final private val hiddenMagicA: Int = 0x95543787 - final private val hiddenMagicB: Int = 0x2ad7eb25 - final private val visibleMixer: Int = 0x52dce729 - final private val hiddenMixerA: Int = 0x7b7d159c - final private val hiddenMixerB: Int = 0x6bce6396 - final private val finalMixer1: Int = 0x85ebca6b - final private val finalMixer2: Int = 0xc2b2ae35 + final private val hiddenMagicB: Int = 0x2AD7EB25 + final private val visibleMixer: Int = 0x52DCE729 + final private val hiddenMixerA: Int = 0x7B7D159C + final private val hiddenMixerB: Int = 0x6BCE6396 + final private val finalMixer1: Int = 0x85EBCA6B + final private val finalMixer2: Int = 0xC2B2AE35 // Arbitrary values used for hashing certain classes - final private val seedString: Int = 0xf7ca7fd2 - final private val seedArray: Int = 0x3c074a61 + final private val seedString: Int = 0xF7CA7FD2 + final private val seedArray: Int = 0x3C074A61 /** The first 23 magic integers from the first stream are stored here */ private val storedMagicA: Array[Int] = @@ -88,7 +88,7 @@ object MurmurHash { /** Once all hashes have been incorporated, this performs a final mixing */ def finalizeHash(hash: Int): Int = { - var i = (hash ^ (hash >>> 16)) + var i = hash ^ (hash >>> 16) i *= finalMixer1 i ^= (i >>> 13) i *= finalMixer2 diff --git a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala index 1d71b7fdfe..9579e6f098 100644 --- a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala +++ b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala @@ -114,7 +114,6 @@ case object OptimalSizeExploringResizer { * * For documentation about the parameters, see the reference.conf - * akka.actor.deployment.default.optimal-size-exploring-resizer - * */ @SerialVersionUID(1L) case class DefaultOptimalSizeExploringResizer( @@ -237,7 +236,7 @@ case class DefaultOptimalSizeExploringResizer( if (totalProcessed > 0) { val duration = Duration.fromNanos(System.nanoTime() - record.checkTime) val last: Duration = duration / totalProcessed - //exponentially decrease the weight of old last metrics data + // exponentially decrease the weight of old last metrics data val toUpdate = performanceLog.get(currentSize).fold(last) { oldSpeed => (oldSpeed * (1.0 - weightOfLatestMetric)) + (last * weightOfLatestMetric) } @@ -259,7 +258,8 @@ case class DefaultOptimalSizeExploringResizer( val currentSize = currentRoutees.length val now = LocalDateTime.now val proposedChange = - if (record.underutilizationStreak.fold(false)(_.start.isBefore(now.minus(downsizeAfterUnderutilizedFor.asJava)))) { + if (record.underutilizationStreak.fold(false)( + _.start.isBefore(now.minus(downsizeAfterUnderutilizedFor.asJava)))) { val downsizeTo = (record.underutilizationStreak.get.highestUtilization * downsizeRatio).toInt Math.min(downsizeTo - currentSize, 0) } else if (performanceLog.isEmpty || record.underutilizationStreak.isDefined) { diff --git a/akka-actor/src/main/scala/akka/routing/Resizer.scala b/akka-actor/src/main/scala/akka/routing/Resizer.scala index 70dbb9b82b..c5ce29a8b8 100644 --- a/akka-actor/src/main/scala/akka/routing/Resizer.scala +++ b/akka-actor/src/main/scala/akka/routing/Resizer.scala @@ -153,7 +153,7 @@ case class DefaultResizer( if (messagesPerResize <= 0) throw new IllegalArgumentException("messagesPerResize must be > 0, was [%s]".format(messagesPerResize)) - def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0) + def isTimeForResize(messageCounter: Long): Boolean = messageCounter % messagesPerResize == 0 override def resize(currentRoutees: immutable.IndexedSeq[Routee]): Int = capacity(currentRoutees) @@ -275,7 +275,7 @@ private[akka] final class ResizablePoolCell( override def sendMessage(envelope: Envelope): Unit = { if (!routerConfig.isManagementMessage(envelope.message) && - resizer.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) { + resizer.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) { super.sendMessage(Envelope(ResizablePoolActor.Resize, self, system)) } @@ -283,18 +283,19 @@ private[akka] final class ResizablePoolCell( } private[akka] def resize(initial: Boolean): Unit = { - if (resizeInProgress.get || initial) try { - tryReportMessageCount() - val requestedCapacity = resizer.resize(router.routees) - if (requestedCapacity > 0) { - val newRoutees = Vector.fill(requestedCapacity)(pool.newRoutee(routeeProps, this)) - addRoutees(newRoutees) - } else if (requestedCapacity < 0) { - val currentRoutees = router.routees - val abandon = currentRoutees.drop(currentRoutees.length + requestedCapacity) - removeRoutees(abandon, stopChild = true) - } - } finally resizeInProgress.set(false) + if (resizeInProgress.get || initial) + try { + tryReportMessageCount() + val requestedCapacity = resizer.resize(router.routees) + if (requestedCapacity > 0) { + val newRoutees = Vector.fill(requestedCapacity)(pool.newRoutee(routeeProps, this)) + addRoutees(newRoutees) + } else if (requestedCapacity < 0) { + val currentRoutees = router.routees + val abandon = currentRoutees.drop(currentRoutees.length + requestedCapacity) + removeRoutees(abandon, stopChild = true) + } + } finally resizeInProgress.set(false) } /** @@ -303,7 +304,7 @@ private[akka] final class ResizablePoolCell( private def tryReportMessageCount(): Unit = { resizer match { case r: OptimalSizeExploringResizer => r.reportMessageCount(router.routees, resizeCounter.get()) - case _ => //ignore + case _ => // ignore } } diff --git a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala index 9a33ce85f3..10403d07ff 100644 --- a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala +++ b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala @@ -113,7 +113,7 @@ private[akka] trait PoolOverrideUnsetConfig[T <: Pool] extends Pool { case p: Pool => val wssConf: PoolOverrideUnsetConfig[T] = if ((this.supervisorStrategy eq Pool.defaultSupervisorStrategy) - && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) + && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) this.withSupervisorStrategy(p.supervisorStrategy).asInstanceOf[PoolOverrideUnsetConfig[T]] else this @@ -426,7 +426,6 @@ final case class AddRoutee(routee: Routee) extends RouterManagementMesssage * For a pool, with child routees, the routee is stopped by sending a [[akka.actor.PoisonPill]] * to the routee. Precautions are taken reduce the risk of dropping messages that are concurrently * being routed to the removed routee, but there are no guarantees. - * */ @SerialVersionUID(1L) final case class RemoveRoutee(routee: Routee) extends RouterManagementMesssage diff --git a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala index 54bfea0e5a..0ce5ab9b79 100644 --- a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala +++ b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala @@ -67,12 +67,12 @@ class SmallestMailboxRoutingLogic extends RoutingLogic { val target = targets(at) val newScore: Long = if (isSuspended(target)) Long.MaxValue - 1 - else { //Just about better than the DeadLetters + else { // Just about better than the DeadLetters (if (isProcessingMessage(target)) 1L else 0L) + (if (!hasMessages(target)) 0L - else { //Race between hasMessages and numberOfMessages here, unfortunate the numberOfMessages returns 0 if unknown + else { // Race between hasMessages and numberOfMessages here, unfortunate the numberOfMessages returns 0 if unknown val noOfMsgs: Long = if (deep) numberOfMessages(target) else 0 - if (noOfMsgs > 0) noOfMsgs else Long.MaxValue - 3 //Just better than a suspended actorref + if (noOfMsgs > 0) noOfMsgs else Long.MaxValue - 3 // Just better than a suspended actorref }) } diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index d1bbe9af33..65e3ae8c67 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -48,7 +48,7 @@ object Serialization { private final def configToMap(cfg: Config): Map[String, String] = { import akka.util.ccompat.JavaConverters._ - cfg.root.unwrapped.asScala.toMap.map { case (k, v) => (k -> v.toString) } + cfg.root.unwrapped.asScala.toMap.map { case (k, v) => k -> v.toString } } } @@ -176,14 +176,15 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { @deprecated("Use deserialize that accepts the `manifest` as a class name.", since = "2.6.0") def deserialize[T](bytes: Array[Byte], serializerId: Int, clazz: Option[Class[_ <: T]]): Try[T] = Try { - val serializer = try getSerializerById(serializerId) - catch { - case _: NoSuchElementException => - throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId]${clazz.map(c => " (class [" + c.getName + "])").getOrElse("")}. " + - "The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in sync between the two systems.") - } + val serializer = + try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId]${clazz.map(c => " (class [" + c.getName + "])").getOrElse("")}. " + + "The most probable reason is that the configuration entry " + + "akka.actor.serializers is not in sync between the two systems.") + } withTransportInformation { () => serializer.fromBinary(bytes, clazz).asInstanceOf[T] } @@ -196,13 +197,14 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ def deserialize(bytes: Array[Byte], serializerId: Int, manifest: String): Try[AnyRef] = Try { - val serializer = try getSerializerById(serializerId) - catch { - case _: NoSuchElementException => - throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in sync between the two systems.") - } + val serializer = + try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + + "akka.actor.serializers is not in sync between the two systems.") + } deserializeByteArray(bytes, serializer, manifest) } @@ -246,13 +248,14 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ @throws(classOf[NotSerializableException]) def deserializeByteBuffer(buf: ByteBuffer, serializerId: Int, manifest: String): AnyRef = { - val serializer = try getSerializerById(serializerId) - catch { - case _: NoSuchElementException => - throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in synch between the two systems.") - } + val serializer = + try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + + "akka.actor.serializers is not in synch between the two systems.") + } // not using `withTransportInformation { () =>` because deserializeByteBuffer is supposed to be the // possibility for allocation free serialization @@ -483,14 +486,14 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * obeying any order between unrelated subtypes (insert sort). */ private def sort(in: Iterable[ClassSerializer]): immutable.Seq[ClassSerializer] = - (in + in .foldLeft(new ArrayBuffer[ClassSerializer](in.size)) { (buf, ca) => buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { case -1 => buf.append(ca) case x => buf.insert(x, ca) } buf - }) + } .to(immutable.Seq) /** @@ -559,7 +562,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { @InternalApi private[akka] def shouldWarnAboutJavaSerializer(serializedClass: Class[_], serializer: Serializer) = { def suppressWarningOnNonSerializationVerification(serializedClass: Class[_]) = { - //suppressed, only when warn-on-no-serialization-verification = off, and extending NoSerializationVerificationNeeded + // suppressed, only when warn-on-no-serialization-verification = off, and extending NoSerializationVerificationNeeded !isWarningOnNoVerificationEnabled && classOf[NoSerializationVerificationNeeded].isAssignableFrom(serializedClass) } diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index 50960659df..cf77c5d530 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -177,7 +177,6 @@ abstract class SerializerWithStringManifest extends Serializer { * // you need to know the maximum size in bytes of the serialized messages * val pool = new akka.io.DirectByteBufferPool(defaultBufferSize = 1024 * 1024, maxPoolEntries = 10) * - * * // Implement this method for compatibility with `SerializerWithStringManifest`. * override def toBinary(o: AnyRef): Array[Byte] = { * val buf = pool.acquire() diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index 076c1af042..dff4ee99b1 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -38,7 +38,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin protected def createNotEmptyCondition(): Condition = lock.newCondition() protected def createNotFullCondition(): Condition = lock.newCondition() - def put(e: E): Unit = { //Blocks until not full + def put(e: E): Unit = { // Blocks until not full if (e eq null) throw new NullPointerException lock.lockInterruptibly() @@ -56,7 +56,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def take(): E = { //Blocks until not empty + def take(): E = { // Blocks until not empty lock.lockInterruptibly() try { @tailrec def takeElement(): E = { @@ -74,26 +74,26 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def offer(e: E): Boolean = { //Tries to do it immediately, if fail return false + def offer(e: E): Boolean = { // Tries to do it immediately, if fail return false if (e eq null) throw new NullPointerException lock.lock() try { if (backing.size() == maxCapacity) false else { - require(backing.offer(e)) //Should never fail + require(backing.offer(e)) // Should never fail notEmpty.signal() true } } finally lock.unlock() } - def offer(e: E, timeout: Long, unit: TimeUnit): Boolean = { //Tries to do it within the timeout, return false if fail + def offer(e: E, timeout: Long, unit: TimeUnit): Boolean = { // Tries to do it within the timeout, return false if fail if (e eq null) throw new NullPointerException lock.lockInterruptibly() try { @tailrec def offerElement(remainingNanos: Long): Boolean = { if (backing.size() < maxCapacity) { - require(backing.offer(e)) //Should never fail + require(backing.offer(e)) // Should never fail notEmpty.signal() true } else if (remainingNanos <= 0) false @@ -103,7 +103,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def poll(timeout: Long, unit: TimeUnit): E = { //Tries to do it within the timeout, returns null if fail + def poll(timeout: Long, unit: TimeUnit): E = { // Tries to do it within the timeout, returns null if fail lock.lockInterruptibly() try { @tailrec def pollElement(remainingNanos: Long): E = { @@ -120,7 +120,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def poll(): E = { //Tries to remove the head of the queue immediately, if fail, return null + def poll(): E = { // Tries to remove the head of the queue immediately, if fail, return null lock.lock() try { backing.poll() match { @@ -132,7 +132,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - override def remove(e: AnyRef): Boolean = { //Tries to do it immediately, if fail, return false + override def remove(e: AnyRef): Boolean = { // Tries to do it immediately, if fail, return false if (e eq null) throw new NullPointerException lock.lock() try { @@ -252,7 +252,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin override def remove(): Unit = { if (last < 0) throw new IllegalStateException val target = elements(last) - last = -1 //To avoid 2 subsequent removes without a next in between + last = -1 // To avoid 2 subsequent removes without a next in between lock.lock() try { @tailrec def removeTarget(i: Iterator[E] = backing.iterator()): Unit = diff --git a/akka-actor/src/main/scala/akka/util/Collections.scala b/akka-actor/src/main/scala/akka/util/Collections.scala index e56faea859..f5d7468ac0 100644 --- a/akka-actor/src/main/scala/akka/util/Collections.scala +++ b/akka-actor/src/main/scala/akka/util/Collections.scala @@ -36,7 +36,7 @@ private[akka] object Collections { _next = apply(potentiallyNext) _hasNext = true true - } else tailrecHasNext() //Attempt to find the next + } else tailrecHasNext() // Attempt to find the next } else _hasNext // Return if we found one } diff --git a/akka-actor/src/main/scala/akka/util/FrequencySketch.scala b/akka-actor/src/main/scala/akka/util/FrequencySketch.scala index f944e29242..8930ff9b25 100644 --- a/akka-actor/src/main/scala/akka/util/FrequencySketch.scala +++ b/akka-actor/src/main/scala/akka/util/FrequencySketch.scala @@ -66,7 +66,7 @@ private[akka] object FrequencySketch { * INTERNAL API * * A frequency sketch for estimating the popularity of items. For implementing the TinyLFU cache admission policy. - + * * This is a generalised frequency sketch with configurable depth (number of hash functions) and counter size. * * The matrix of counters is a two-dimensional array of longs, which each hold multiple counters depending on the @@ -239,7 +239,7 @@ private[akka] object FastFrequencySketch { * INTERNAL API * * A faster implementation of the frequency sketch (around twice as fast). - + * * This frequency sketch uses a fixed depth (number of hash functions) of 4 and a counter size of 4 bits (0-15), * so that constants can be used for improved efficiency. It also uses its own rehashing of item hash codes. * @@ -301,8 +301,8 @@ private[akka] final class FastFrequencySketch[A](width: Int, resetSize: Int) { // https://github.com/skeeto/hash-prospector private def rehash(hash: Int): Int = { var x = hash - x = ((x >>> 15) ^ x) * 0xd168aaad - x = ((x >>> 15) ^ x) * 0xaf723597 + x = ((x >>> 15) ^ x) * 0xD168AAAD + x = ((x >>> 15) ^ x) * 0xAF723597 (x >>> 15) ^ x } diff --git a/akka-actor/src/main/scala/akka/util/HashCode.scala b/akka-actor/src/main/scala/akka/util/HashCode.scala index fb3c756199..3eace37756 100644 --- a/akka-actor/src/main/scala/akka/util/HashCode.scala +++ b/akka-actor/src/main/scala/akka/util/HashCode.scala @@ -4,7 +4,7 @@ package akka.util -import java.lang.{ Float => JFloat, Double => JDouble } +import java.lang.{ Double => JDouble, Float => JFloat } import java.lang.reflect.{ Array => JArray } /** diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index 323eb7c80d..94db0fadfe 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -32,7 +32,7 @@ object Helpers { * that the ordering is actually consistent and you cannot have a * sequence which cyclically is monotone without end. */ - val diff = ((System.identityHashCode(a) & 0XFFFFFFFFL) - (System.identityHashCode(b) & 0XFFFFFFFFL)) + val diff = (System.identityHashCode(a) & 0xFFFFFFFFL) - (System.identityHashCode(b) & 0xFFFFFFFFL) if (diff > 0) 1 else if (diff < 0) -1 else 0 } diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index fd6938e3b8..a28180c810 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -32,7 +32,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * @return true if the value didn't exist for the key previously, and false otherwise */ def put(key: K, value: V): Boolean = { - //Tailrecursive spin-locking put + // Tailrecursive spin-locking put @tailrec def spinPut(k: K, v: V): Boolean = { var retry = false @@ -41,8 +41,8 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { if (set ne null) { set.synchronized { - if (set.isEmpty) retry = true //IF the set is empty then it has been removed, so signal retry - else { //Else add the value to the set and signal that retry is not needed + if (set.isEmpty) retry = true // IF the set is empty then it has been removed, so signal retry + else { // Else add the value to the set and signal that retry is not needed added = set.add(v) retry = false } @@ -55,8 +55,8 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { val oldSet = container.putIfAbsent(k, newSet) if (oldSet ne null) { oldSet.synchronized { - if (oldSet.isEmpty) retry = true //IF the set is empty then it has been removed, so signal retry - else { //Else try to add the value to the set and signal that retry is not needed + if (oldSet.isEmpty) retry = true // IF the set is empty then it has been removed, so signal retry + else { // Else try to add the value to the set and signal that retry is not needed added = oldSet.add(v) retry = false } @@ -125,14 +125,14 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { if (set ne null) { set.synchronized { - if (set.remove(value)) { //If we can remove the value - if (set.isEmpty) //and the set becomes empty - container.remove(key, emptySet) //We try to remove the key if it's mapped to an empty set + if (set.remove(value)) { // If we can remove the value + if (set.isEmpty) // and the set becomes empty + container.remove(key, emptySet) // We try to remove the key if it's mapped to an empty set - true //Remove succeeded - } else false //Remove failed + true // Remove succeeded + } else false // Remove failed } - } else false //Remove failed + } else false // Remove failed } /** @@ -150,7 +150,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { set.clear() // Clear the original set to signal to any pending writers that there was a conflict Some(ret) } - } else None //Remove failed + } else None // Remove failed } /** @@ -164,9 +164,9 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { if (set ne null) { set.synchronized { - if (set.remove(value)) { //If we can remove the value - if (set.isEmpty) //and the set becomes empty - container.remove(e.getKey, emptySet) //We try to remove the key if it's mapped to an empty set + if (set.remove(value)) { // If we can remove the value + if (set.isEmpty) // and the set becomes empty + container.remove(e.getKey, emptySet) // We try to remove the key if it's mapped to an empty set } } } diff --git a/akka-actor/src/main/scala/akka/util/LineNumbers.scala b/akka-actor/src/main/scala/akka/util/LineNumbers.scala index 7298b01270..8a1d577a9b 100644 --- a/akka-actor/src/main/scala/akka/util/LineNumbers.scala +++ b/akka-actor/src/main/scala/akka/util/LineNumbers.scala @@ -217,7 +217,7 @@ object LineNumbers { private def skipID(d: DataInputStream): Unit = { val magic = d.readInt() if (debug) println(f"LNB: magic=0x$magic%08X") - if (magic != 0xcafebabe) throw new IllegalArgumentException("not a Java class file") + if (magic != 0xCAFEBABE) throw new IllegalArgumentException("not a Java class file") } private def skipVersion(d: DataInputStream): Unit = { diff --git a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala index 2ec8fafe5f..5a8e2b6069 100644 --- a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala +++ b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala @@ -134,9 +134,9 @@ final class ManifestInfo(val system: ExtendedActorSystem) extends Extension { } if (title != null - && version != null - && vendor != null - && knownVendors(vendor)) { + && version != null + && vendor != null + && knownVendors(vendor)) { manifests = manifests.updated(title, new Version(version)) } } finally { diff --git a/akka-actor/src/main/scala/akka/util/PrettyByteString.scala b/akka-actor/src/main/scala/akka/util/PrettyByteString.scala index 04c4c3a958..b8f8832453 100644 --- a/akka-actor/src/main/scala/akka/util/PrettyByteString.scala +++ b/akka-actor/src/main/scala/akka/util/PrettyByteString.scala @@ -19,7 +19,7 @@ private[akka] object PrettyByteString { def formatBytes(bs: ByteString, maxBytes: Int = 16 * 5): Iterator[String] = { def asHex(b: Byte): String = "%02X".format(b) def asASCII(b: Byte): Char = - if (b >= 0x20 && b < 0x7f) b.toChar + if (b >= 0x20 && b < 0x7F) b.toChar else '.' def formatLine(bs: ByteString): String = { diff --git a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala index 4a4f98db4f..6a601bdf04 100644 --- a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala +++ b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala @@ -26,7 +26,7 @@ private[akka] object PrettyDuration { implicit class PrettyPrintableDuration(val duration: Duration) extends AnyVal { - /** Selects most appropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision **/ + /** Selects most appropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision * */ def pretty: String = pretty(includeNanos = false) /** Selects most appropriate TimeUnit for given duration and formats it accordingly */ diff --git a/akka-actor/src/main/scala/akka/util/UUIDComparator.scala b/akka-actor/src/main/scala/akka/util/UUIDComparator.scala index 167603fb9f..a252b16a04 100644 --- a/akka-actor/src/main/scala/akka/util/UUIDComparator.scala +++ b/akka-actor/src/main/scala/akka/util/UUIDComparator.scala @@ -54,9 +54,9 @@ class UUIDComparator extends Comparator[UUID] { * but if signs don't agree need to resolve differently */ if (i1 < 0) { - if (i2 < 0) (i1 - i2) else 1 + if (i2 < 0) i1 - i2 else 1 } else { - if (i2 < 0) -1 else (i1 - i2) + if (i2 < 0) -1 else i1 - i2 } } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala index 58c1f94f5c..657adb6301 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala @@ -37,7 +37,7 @@ class ActorBenchmark { @Param(Array("50")) var batchSize = 0 - //@Param(Array("akka.actor.ManyToOneArrayMailbox")) + // @Param(Array("akka.actor.ManyToOneArrayMailbox")) @Param( Array( "akka.dispatch.SingleConsumerOnlyUnboundedMailbox", diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala index 197ece0698..ad3636ffab 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala @@ -26,7 +26,7 @@ class AffinityPoolComparativeBenchmark { @Param(Array("affinity-dispatcher", "default-fj-dispatcher", "fixed-size-dispatcher")) var dispatcher = "" - @Param(Array("SingleConsumerOnlyUnboundedMailbox")) //"default" + @Param(Array("SingleConsumerOnlyUnboundedMailbox")) // "default" var mailbox = "" final val numThreads, numActors = 8 diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala index 0cddc4b931..41e2430d5c 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala @@ -26,7 +26,7 @@ class AffinityPoolRequestResponseBenchmark { @Param(Array("affinity-dispatcher", "default-fj-dispatcher", "fixed-size-dispatcher")) var dispatcher = "" - @Param(Array("SingleConsumerOnlyUnboundedMailbox")) //"default" + @Param(Array("SingleConsumerOnlyUnboundedMailbox")) // "default" var mailbox = "" final val numThreads, numActors = 8 diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala index d2ca283744..57215eb6bf 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala @@ -83,9 +83,10 @@ class ScheduleBenchmark { val tryWithNext = (1 to to) .foldLeft(0.millis -> List[Cancellable]()) { case ((interv, c), idx) => - (interv + interval, scheduler.scheduleOnce(interv) { - op(idx) - } :: c) + (interv + interval, + scheduler.scheduleOnce(interv) { + op(idx) + } :: c) } ._2 promise.future.onComplete { diff --git a/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala index 1787d0ef06..5c5f0b64c2 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala @@ -112,7 +112,7 @@ object WorkPullingProducer { val requestNextAdapter = context.messageAdapter[WorkPullingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") + .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") Behaviors.receiveMessagePartial { case WrappedRequestNext(next) => diff --git a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala index 4c81f36680..c35924cff0 100644 --- a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala @@ -56,19 +56,19 @@ class VersionVectorBenchmark { } @Benchmark - def increment: VersionVector = (vv1 + nodeA) + def increment: VersionVector = vv1 + nodeA @Benchmark - def compareSame1: Boolean = (vv1 == dot1) + def compareSame1: Boolean = vv1 == dot1 @Benchmark - def compareSame2: Boolean = (vv2 == dot1) + def compareSame2: Boolean = vv2 == dot1 @Benchmark - def compareGreaterThan1: Boolean = (vv1 > dot1) + def compareGreaterThan1: Boolean = vv1 > dot1 @Benchmark - def compareGreaterThan2: Boolean = (vv2 > dot1) + def compareGreaterThan2: Boolean = vv2 > dot1 @Benchmark def merge: VersionVector = vv1.merge(vv2) diff --git a/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala index ce9702ffa4..320861b29b 100644 --- a/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala @@ -44,11 +44,11 @@ mailbox { """).withFallback(ConfigFactory.load()) implicit val sys: ActorSystem = ActorSystem("ANQ", config) val ref = sys.actorOf(Props(new Actor { - def receive = { - case Stop => sender() ! Stop - case _ => - } - }).withDispatcher("dispatcher").withMailbox("mailbox"), "receiver") + def receive = { + case Stop => sender() ! Stop + case _ => + } + }).withDispatcher("dispatcher").withMailbox("mailbox"), "receiver") @TearDown def teardown(): Unit = Await.result(sys.terminate(), 5.seconds) diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala index 26e452a19a..e1aec25af5 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala @@ -108,7 +108,7 @@ class NoPersistPersistentActorWithAtLeastOnceDelivery( case n: Int => deliver(downStream)(deliveryId => Msg(deliveryId, n)) if (n == respondAfter) - //switch to wait all message confirmed + // switch to wait all message confirmed context.become(waitConfirm) case Confirm(deliveryId) => confirmDelivery(deliveryId) @@ -146,7 +146,7 @@ class PersistPersistentActorWithAtLeastOnceDelivery( persist(MsgSent(n)) { _ => deliver(downStream)(deliveryId => Msg(deliveryId, n)) if (n == respondAfter) - //switch to wait all message confirmed + // switch to wait all message confirmed context.become(waitConfirm) } case Confirm(deliveryId) => @@ -185,7 +185,7 @@ class PersistAsyncPersistentActorWithAtLeastOnceDelivery( persistAsync(MsgSent(n)) { _ => deliver(downStream)(deliveryId => Msg(deliveryId, n)) if (n == respondAfter) - //switch to wait all message confirmed + // switch to wait all message confirmed context.become(waitConfirm) } case Confirm(deliveryId) => @@ -227,7 +227,7 @@ class DestinationActor extends Actor { case Msg(deliveryId, _) => seqNr += 1 if (seqNr % 11 == 0) { - //drop it + // drop it } else { sender() ! Confirm(deliveryId) } diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala index e640fbe2ce..a4fa1099b1 100644 --- a/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala @@ -63,14 +63,13 @@ class SerializationFormatCacheBenchmark { @Setup def init(): Unit = { system = ActorSystem("SerializationFormatCacheBenchmark") - temporaryActorRefs = Array.tabulate(uniqueTemporaryRefs)( - n => - new PromiseActorRef( - system.asInstanceOf[ExtendedActorSystem].provider, - Promise(), - "Any", - // request path is encoded in this string - s"_user_region_shard${n % 100}_entitypretendid${n}")) + temporaryActorRefs = Array.tabulate(uniqueTemporaryRefs)(n => + new PromiseActorRef( + system.asInstanceOf[ExtendedActorSystem].provider, + Promise(), + "Any", + // request path is encoded in this string + s"_user_region_shard${n % 100}_entitypretendid${n}")) topLevelActorRefs = Array.tabulate(uniqueTopLevelRefs)(n => system.actorOf(Props.empty, s"actor_$n")) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala index 2e2a6a3ade..979b03f4d6 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala @@ -39,5 +39,5 @@ class EmptySourceBenchmark { Rewrite to GraphStage: [info] EmptySourceBenchmark.empty thrpt 10 17.556 ± 2.865 ops/ms - */ + */ } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala index 378d9eb890..076f30eaee 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala @@ -38,7 +38,7 @@ class InterpreterBenchmark { val b = builder(identities: _*).connect(source, identities.head.in).connect(identities.last.out, sink) // FIXME: This should not be here, this is pure setup overhead - for (i <- (0 until identities.size - 1)) { + for (i <- 0 until identities.size - 1) { b.connect(identities(i).out, identities(i + 1).in) } @@ -77,13 +77,14 @@ object InterpreterBenchmark { override val in: akka.stream.Inlet[T] = Inlet[T]("in") in.id = 0 - setHandler(in, new InHandler { - override def onPush(): Unit = { - expected -= 1 - if (expected > 0) pull(in) - // Otherwise do nothing, it will exit the interpreter - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + expected -= 1 + if (expected > 0) pull(in) + // Otherwise do nothing, it will exit the interpreter + } + }) def requestOne(): Unit = pull(in) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala index 71c28a2c77..5a3f8e82dc 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala @@ -35,13 +35,13 @@ object MaterializationBenchmark { var outlet = broadcast.out(0) for (i <- 1 until numOfJunctions) { val merge = b.add(Merge[Unit](2)) - outlet ~> merge + outlet ~> merge broadcast.out(i) ~> merge outlet = merge.out } Source.single(()) ~> broadcast - outlet ~> Sink.ignore + outlet ~> Sink.ignore ClosedShape }) @@ -56,7 +56,7 @@ object MaterializationBenchmark { } Source.single(()) ~> broadcast - merge ~> Sink.ignore + merge ~> Sink.ignore ClosedShape }) diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala index 9fc591963f..7c2949c9b7 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala @@ -197,7 +197,6 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { if (m.status == MemberStatus.Up || m.status == MemberStatus.WeaklyUp) addMember(m) case _: MemberEvent => // not interested in other types of MemberEvent - } override def postStop(): Unit = { @@ -225,7 +224,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`. */ def receiveState(state: CurrentClusterState): Unit = - nodes = (state.members.diff(state.unreachable)).collect { + nodes = state.members.diff(state.unreachable).collect { case m if m.status == MemberStatus.Up || m.status == MemberStatus.WeaklyUp => m.address } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala index 15cb014afd..2541ad8c1d 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala @@ -54,7 +54,7 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension { .getOrElse { val log: LoggingAdapter = Logging(system, classOf[ClusterMetricsExtension]) log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[ - ClusterMetricsStrategy].getName}.") + ClusterMetricsStrategy].getName}.") new ClusterMetricsStrategy(SupervisorStrategyConfiguration) } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala index 6849d3b0ee..8c0dad2b43 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala @@ -405,14 +405,14 @@ object MetricsSelector { val args = List(classOf[Config] -> config) dynamicAccess .createInstanceFor[MetricsSelector](fqn, args) - .recover({ + .recover { case exception => throw new IllegalArgumentException( - (s"Cannot instantiate metrics-selector [$fqn], " + + s"Cannot instantiate metrics-selector [$fqn], " + "make sure it extends [akka.cluster.routing.MetricsSelector] and " + - "has constructor with [com.typesafe.config.Config] parameter"), + "has constructor with [com.typesafe.config.Config] parameter", exception) - }) + } .get } } @@ -458,7 +458,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector { val (_, min) = capacity.minBy { case (_, c) => c } // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero val divisor = math.max(0.01, min) - capacity.map { case (address, c) => (address -> math.round((c) / divisor).toInt) } + capacity.map { case (address, c) => address -> math.round(c / divisor).toInt } } } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala index 026a395864..76fa25c2dc 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala @@ -23,7 +23,6 @@ import scala.concurrent.duration.FiniteDuration * @param value the current exponentially weighted moving average, e.g. Y(n - 1), or, * the sampled value resulting from the previous smoothing iteration. * This value is always used as the previous EWMA to calculate the new EWMA. - * */ @SerialVersionUID(1L) final case class EWMA(value: Double, alpha: Double) { diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala index ef18fdddbc..c2f0d88c2e 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala @@ -38,7 +38,8 @@ final case class Metric private[metrics] (name: String, value: Number, average: case Some(avg) => copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue)) case None if latest.average.isDefined => copy(value = latest.value, average = latest.average) case _ => copy(value = latest.value) - } else this + } + else this /** * The numerical value of the average, if defined, otherwise the latest value diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala index 36645636fe..454fa052c2 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala @@ -67,9 +67,9 @@ trait SigarProvider { TryNative { verifiedSigarInstance }.orElse(TryNative { - provisionSigarLibrary() - verifiedSigarInstance - }) + provisionSigarLibrary() + verifiedSigarInstance + }) .recover { case e: Throwable => throw new RuntimeException("Failed to load sigar:", e) } get diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala index 2b38d31716..d0c9e6f33b 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala @@ -305,7 +305,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS val mm = cm.MixMetricsSelector.parseFrom(bytes) MixMetricsSelector( mm.getSelectorsList.asScala - // should be safe because we serialized only the right subtypes of MetricsSelector + // should be safe because we serialized only the right subtypes of MetricsSelector .map(s => metricSelectorFromProto(s).asInstanceOf[CapacityMetricsSelector]) .toIndexedSeq) } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala index 393942e767..fa9e5f326b 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala @@ -108,10 +108,10 @@ abstract class ClusterMetricsEnabledSpec enterBarrier("cluster-started") awaitAssert(clusterView.members.count(_.status == MemberStatus.Up) should ===(roles.size)) // TODO ensure same contract - //awaitAssert(clusterView.clusterMetrics.size should ===(roles.size)) + // awaitAssert(clusterView.clusterMetrics.size should ===(roles.size)) awaitAssert(metricsView.clusterMetrics.size should ===(roles.size)) val collector = MetricsCollector(cluster.system) - collector.sample().metrics.size should be > (3) + collector.sample().metrics.size should be > 3 enterBarrier("after") } "reflect the correct number of node metrics in cluster view" in within(30 seconds) { @@ -122,7 +122,7 @@ abstract class ClusterMetricsEnabledSpec runOn(node2, node3, node4, node5) { markNodeAsUnavailable(node1) // TODO ensure same contract - //awaitAssert(clusterView.clusterMetrics.size should ===(roles.size - 1)) + // awaitAssert(clusterView.clusterMetrics.size should ===(roles.size - 1)) awaitAssert(metricsView.clusterMetrics.size should ===(roles.size - 1)) } enterBarrier("finished") @@ -146,12 +146,12 @@ abstract class ClusterMetricsDisabledSpec "not collect metrics, not publish metrics events, and not gossip metrics" in { awaitClusterUp(roles: _*) // TODO ensure same contract - //clusterView.clusterMetrics.size should ===(0) + // clusterView.clusterMetrics.size should ===(0) metricsView.clusterMetrics.size should ===(0) ClusterMetricsExtension(system).subscribe(testActor) expectNoMessage() // TODO ensure same contract - //clusterView.clusterMetrics.size should ===(0) + // clusterView.clusterMetrics.size should ===(0) metricsView.clusterMetrics.size should ===(0) enterBarrier("after") } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index 99a18b9d23..819d4b2703 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -133,9 +133,9 @@ abstract class AdaptiveLoadBalancingRouterSpec def receiveReplies(expectedReplies: Int): Map[Address, Int] = { val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) - (receiveWhile(5 seconds, messages = expectedReplies) { + receiveWhile(5 seconds, messages = expectedReplies) { case Reply(address) => address - }).foldLeft(zero) { + }.foldLeft(zero) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } } @@ -188,9 +188,9 @@ abstract class AdaptiveLoadBalancingRouterSpec val replies = receiveReplies(iterationCount) - replies(node1) should be > (0) - replies(node2) should be > (0) - replies(node3) should be > (0) + replies(node1) should be > 0 + replies(node2) should be > 0 + replies(node3) should be > 0 replies.values.sum should ===(iterationCount) } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala index bed0c96b95..f438b141db 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala @@ -88,24 +88,24 @@ abstract class StatsSampleSpec override def afterAll() = multiNodeSpecAfterAll() - //#abstract-test + // #abstract-test "The stats sample" must { - //#startup-cluster + // #startup-cluster "illustrate how to startup cluster" in within(15 seconds) { Cluster(system).subscribe(testActor, classOf[MemberUp]) expectMsgClass(classOf[CurrentClusterState]) - //#addresses + // #addresses val firstAddress = node(first).address val secondAddress = node(second).address val thirdAddress = node(third).address - //#addresses + // #addresses - //#join + // #join Cluster(system).join(firstAddress) - //#join + // #join system.actorOf(Props[StatsWorker](), "statsWorker") system.actorOf(Props[StatsService](), "statsService") @@ -117,9 +117,9 @@ abstract class StatsSampleSpec testConductor.enter("all-up") } - //#startup-cluster + // #startup-cluster - //#test-statsService + // #test-statsService "show usage of the statsService from one node" in within(15 seconds) { runOn(second) { assertServiceOk() @@ -138,7 +138,7 @@ abstract class StatsSampleSpec } } - //#test-statsService + // #test-statsService "show usage of the statsService from all nodes" in within(15 seconds) { assertServiceOk() diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala index d7404e2a55..7dbdb2a61b 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala @@ -50,7 +50,7 @@ class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor { // not used, only for documentation abstract class StatsService2 extends Actor { - //#router-lookup-in-code + // #router-lookup-in-code import akka.cluster.routing.{ ClusterRouterGroup, ClusterRouterGroupSettings } import akka.routing.ConsistentHashingGroup @@ -63,12 +63,12 @@ abstract class StatsService2 extends Actor { allowLocalRoutees = true, useRoles = Set("compute"))).props(), name = "workerRouter2") - //#router-lookup-in-code + // #router-lookup-in-code } // not used, only for documentation abstract class StatsService3 extends Actor { - //#router-deploy-in-code + // #router-deploy-in-code import akka.cluster.routing.{ ClusterRouterPool, ClusterRouterPoolSettings } import akka.routing.ConsistentHashingPool @@ -78,5 +78,5 @@ abstract class StatsService3 extends Actor { ClusterRouterPoolSettings(totalInstances = 100, maxInstancesPerNode = 3, allowLocalRoutees = false)) .props(Props[StatsWorker]()), name = "workerRouter3") - //#router-deploy-in-code + // #router-deploy-in-code } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala index b52750259f..7995a6db77 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala @@ -77,8 +77,8 @@ class ClusterMetricsExtensionSpec case (mockMetrics, expectedData) => (mockMetrics, expectedData) match { case ( - Cpu(_, _, loadAverageMock, cpuCombinedMock, cpuStolenMock, _), - (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) => + Cpu(_, _, loadAverageMock, cpuCombinedMock, cpuStolenMock, _), + (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) => loadAverageMock.get should ===(loadAverageEwma +- epsilon) cpuCombinedMock.get should ===(cpuCombinedEwma +- epsilon) cpuStolenMock.get should ===(cpuStolenEwma +- epsilon) diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index 3d5afc05ae..62a33ba6ab 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -124,7 +124,7 @@ class MetricsSelectorSpec extends AnyWordSpec with Matchers { capacity(a1) should ===((0.75 + 0.67 + 0.9375) / 3 +- 0.0001) capacity(b1) should ===((0.75 + 0.34 + 0.9375) / 3 +- 0.0001) capacity(c1) should ===((0.0 + 0.01 + 0.0) / 3 +- 0.0001) - capacity(d1) should ===((0.001953125) / 1 +- 0.0001) + capacity(d1) should ===(0.001953125 / 1 +- 0.0001) } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala index 79bac1813a..ac44f6c487 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala @@ -65,15 +65,15 @@ class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollec "calculate sane alpha from short half-life" in { val alpha = EWMA.alpha(1.millis, 3.seconds) - alpha should be <= (1.0) - alpha should be >= (0.0) + alpha should be <= 1.0 + alpha should be >= 0.0 alpha should ===(1.0 +- 0.001) } "calculate sane alpha from long half-life" in { val alpha = EWMA.alpha(1.day, 3.seconds) - alpha should be <= (1.0) - alpha should be >= (0.0) + alpha should be <= 1.0 + alpha should be >= 0.0 alpha should ===(0.0 +- 0.001) } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala index 88c9088508..384cdbd12e 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala @@ -58,8 +58,8 @@ class MetricsCollectorSpec "collect accurate metrics for a node" in { val sample = collector.sample() - val metrics = sample.metrics.collect { case m => (m.name, m.value) } - val used = metrics.collectFirst { case (HeapMemoryUsed, b) => b } + val metrics = sample.metrics.collect { case m => (m.name, m.value) } + val used = metrics.collectFirst { case (HeapMemoryUsed, b) => b } val committed = metrics.collectFirst { case (HeapMemoryCommitted, b) => b } metrics.foreach { case (SystemLoadAverage, b) => b.doubleValue should be >= 0.0 diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala index 5f8d8ba67b..9b3afce6ef 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala @@ -107,7 +107,7 @@ trait MetricsCollectorFactory { this: AkkaSpec => def createMetricsCollector: MetricsCollector = try { new SigarMetricsCollector(selfAddress, defaultDecayFactor, new Sigar()) - //new SigarMetricsCollector(selfAddress, defaultDecayFactor, SimpleSigarProvider().createSigarInstance) + // new SigarMetricsCollector(selfAddress, defaultDecayFactor, SimpleSigarProvider().createSigarInstance) } catch { case e: Throwable => log.warning("Sigar failed to load. Using JMX. Reason: " + e.toString) @@ -134,7 +134,6 @@ trait MetricsCollectorFactory { this: AkkaSpec => } /** - * */ class MockitoSigarMetricsCollector(system: ActorSystem) extends SigarMetricsCollector( diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala index b56efcd619..bbce7d27c4 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala @@ -91,14 +91,14 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" val weighted = new WeightedRoutees(routees2, a1, weights) (1 to 2).foreach { weighted(_) should ===(testActorRoutee) } - (3 to weighted.total).foreach { weighted(_) should not be (testActorRoutee) } + (3 to weighted.total).foreach { weighted(_) should not be testActorRoutee } } "not allocate ref with weight zero" in { val weights = Map(a1 -> 0, b1 -> 2, c1 -> 10) val weighted = new WeightedRoutees(routees, a1, weights) - (1 to weighted.total).foreach { weighted(_) should not be (routeeA) } + (1 to weighted.total).foreach { weighted(_) should not be routeeA } } } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala index 6fe9b80110..1087f1df18 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala @@ -51,13 +51,15 @@ object ReplicatedEntityProvider { */ def apply[M: ClassTag](typeName: String, allReplicaIds: Set[ReplicaId])( settingsPerReplicaFactory: (EntityTypeKey[M], ReplicaId) => ReplicatedEntity[M]): ReplicatedEntityProvider[M] = { - new ReplicatedEntityProvider(allReplicaIds.map { replicaId => - if (typeName.contains(Separator)) - throw new IllegalArgumentException(s"typeName [$typeName] contains [$Separator] which is a reserved character") + new ReplicatedEntityProvider( + allReplicaIds.map { replicaId => + if (typeName.contains(Separator)) + throw new IllegalArgumentException( + s"typeName [$typeName] contains [$Separator] which is a reserved character") - val typeKey = EntityTypeKey[M](s"$typeName${Separator}${replicaId.id}") - (settingsPerReplicaFactory(typeKey, replicaId), typeName) - }.toVector, directReplication = true) + val typeKey = EntityTypeKey[M](s"$typeName${Separator}${replicaId.id}") + (settingsPerReplicaFactory(typeKey, replicaId), typeName) + }.toVector, directReplication = true) } /** @@ -69,9 +71,10 @@ object ReplicatedEntityProvider { def perDataCenter[M: ClassTag, E](typeName: String, allReplicaIds: Set[ReplicaId])( create: ReplicationId => Behavior[M]): ReplicatedEntityProvider[M] = { apply(typeName, allReplicaIds) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - create(ReplicationId.fromString(entityContext.entityId)) - }.withDataCenter(replicaId.id)) + ReplicatedEntity(replicaId, + Entity(typeKey) { entityContext => + create(ReplicationId.fromString(entityContext.entityId)) + }.withDataCenter(replicaId.id)) } } @@ -85,9 +88,10 @@ object ReplicatedEntityProvider { def perRole[M: ClassTag, E](typeName: String, allReplicaIds: Set[ReplicaId])( create: ReplicationId => Behavior[M]): ReplicatedEntityProvider[M] = { apply(typeName, allReplicaIds) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - create(ReplicationId.fromString(entityContext.entityId)) - }.withRole(replicaId.id)) + ReplicatedEntity(replicaId, + Entity(typeKey) { entityContext => + create(ReplicationId.fromString(entityContext.entityId)) + }.withRole(replicaId.id)) } } @@ -104,9 +108,10 @@ object ReplicatedEntityProvider { createBehavior: java.util.function.Function[ReplicationId, Behavior[M]]): ReplicatedEntityProvider[M] = { implicit val classTag: ClassTag[M] = ClassTag(messageClass) apply(typeName, allReplicaIds.asScala.toSet) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - createBehavior(ReplicationId.fromString(entityContext.entityId)) - }.withDataCenter(replicaId.id)) + ReplicatedEntity(replicaId, + Entity(typeKey) { entityContext => + createBehavior(ReplicationId.fromString(entityContext.entityId)) + }.withDataCenter(replicaId.id)) } } @@ -125,15 +130,15 @@ object ReplicatedEntityProvider { createBehavior: akka.japi.function.Function[ReplicationId, Behavior[M]]): ReplicatedEntityProvider[M] = { implicit val classTag: ClassTag[M] = ClassTag(messageClass) apply(typeName, allReplicaIds.asScala.toSet) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - createBehavior(ReplicationId.fromString(entityContext.entityId)) - }.withRole(replicaId.id)) + ReplicatedEntity(replicaId, + Entity(typeKey) { entityContext => + createBehavior(ReplicationId.fromString(entityContext.entityId)) + }.withRole(replicaId.id)) } } } /** - * * @tparam M The type of messages the replicated entity accepts */ final class ReplicatedEntityProvider[M] private ( @@ -145,7 +150,6 @@ final class ReplicatedEntityProvider[M] private ( * to also have it enabled through [[akka.persistence.typed.scaladsl.EventSourcedBehavior.withEventPublishing]] * or [[akka.persistence.typed.javadsl.ReplicatedEventSourcedBehavior.withEventPublishing]] * to work. - * */ def withDirectReplication(enabled: Boolean): ReplicatedEntityProvider[M] = new ReplicatedEntityProvider(replicas, directReplication = enabled) diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala index d8838ebd7d..8c7a3aebc0 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala @@ -53,7 +53,7 @@ import akka.util.JavaDurationConverters._ extends ShardingMessageExtractor[Any, M] { override def entityId(message: Any): String = { message match { - case ShardingEnvelope(entityId, _) => entityId //also covers ClassicStartEntity in ShardingEnvelope + case ShardingEnvelope(entityId, _) => entityId // also covers ClassicStartEntity in ShardingEnvelope case ClassicStartEntity(entityId) => entityId case msg => delegate.entityId(msg.asInstanceOf[E]) } @@ -64,7 +64,7 @@ import akka.util.JavaDurationConverters._ override def unwrapMessage(message: Any): M = { message match { case ShardingEnvelope(_, msg: M @unchecked) => - //also covers ClassicStartEntity in ShardingEnvelope + // also covers ClassicStartEntity in ShardingEnvelope msg case msg: ClassicStartEntity => // not really of type M, but erased and StartEntity is only handled internally, not delivered to the entity diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala index 4541f7fe66..7bccc73471 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala @@ -29,20 +29,21 @@ import akka.annotation.InternalApi */ @InternalApi private[sharding] object Murmur2 { - def toPositive(number: Int): Int = number & 0x7fffffff + def toPositive(number: Int): Int = number & 0x7FFFFFFF def murmur2(data: Array[Byte]) = { val length = data.length - val seed = 0x9747b28c + val seed = 0x9747B28C // 'm' and 'r' are mixing constants generated offline. // They're not really 'magic', they just happen to work well. - val m = 0x5bd1e995 + val m = 0x5BD1E995 val r = 24 // Initialize the hash to a random value var h = seed ^ length val length4 = length / 4 for (i <- 0 until length4) { val i4 = i * 4 - var k = (data(i4 + 0) & 0xff) + ((data(i4 + 1) & 0xff) << 8) + ((data(i4 + 2) & 0xff) << 16) + ((data(i4 + 3) & 0xff) << 24) + var k = (data(i4 + 0) & 0xFF) + ((data(i4 + 1) & 0xFF) << 8) + ((data(i4 + 2) & 0xFF) << 16) + ((data( + i4 + 3) & 0xFF) << 24) k *= m k ^= k >>> r k *= m @@ -52,16 +53,16 @@ private[sharding] object Murmur2 { // Handle the last few bytes of the input array length % 4 match { case 3 => - h ^= (data((length & ~3) + 2) & 0xff) << 16 - h ^= (data((length & ~3) + 1) & 0xff) << 8 - h ^= data(length & ~3) & 0xff + h ^= (data((length & ~3) + 2) & 0xFF) << 16 + h ^= (data((length & ~3) + 1) & 0xFF) << 8 + h ^= data(length & ~3) & 0xFF h *= m case 2 => - h ^= (data((length & ~3) + 1) & 0xff) << 8 - h ^= data(length & ~3) & 0xff + h ^= (data((length & ~3) + 1) & 0xFF) << 8 + h ^= data(length & ~3) & 0xFF h *= m case 1 => - h ^= data(length & ~3) & 0xff + h ^= data(length & ~3) & 0xFF h *= m case 0 => // fall through diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala index 7a79040014..bee8ee58f1 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala @@ -280,7 +280,6 @@ final class Entity[M, E] private ( copy(stopMessage = Optional.ofNullable(newStopMessage)) /** - * * If a `messageExtractor` is not specified the messages are sent to the entities by wrapping * them in [[ShardingEnvelope]] with the entityId of the recipient actor. That envelope * is used by the [[HashCodeMessageExtractor]] for extracting entityId and shardId. The number of diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala index 1f4041d8e7..0e4a9812a8 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala @@ -275,7 +275,6 @@ final class Entity[M, E] private[akka] ( copy(stopMessage = Option(newStopMessage)) /** - * * If a `messageExtractor` is not specified the messages are sent to the entities by wrapping * them in [[ShardingEnvelope]] with the entityId of the recipient actor. That envelope * is used by the [[HashCodeMessageExtractor]] for extracting entityId and shardId. The number of diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala index f19c38fa27..495c9d3f44 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala @@ -170,8 +170,8 @@ abstract class ClusterShardingRememberEntitiesPerfSpec println(f"Average throughput: ${throughputs.sum / NrIterations}%,.0f msg/s") println("Combined latency figures:") println(s"total ${fullHistogram.getTotalCount} max ${fullHistogram.getMaxValue} ${percentiles - .map(p => s"$p% ${fullHistogram.getValueAtPercentile(p)}ms") - .mkString(" ")}") + .map(p => s"$p% ${fullHistogram.getValueAtPercentile(p)}ms") + .mkString(" ")}") recording.endAndDump(Paths.get("target", s"${name.replace(" ", "-")}.jfr")) } enterBarrier(s"after-start-stop-${testRun}") @@ -226,12 +226,12 @@ abstract class ClusterShardingRememberEntitiesPerfSpec } awaitAssert({ - val probe = TestProbe() - region.tell(GetShardRegionState, probe.ref) - val stats = probe.expectMsgType[CurrentShardRegionState] - stats.shards.head.shardId shouldEqual "0" - stats.shards.head.entityIds.toList.sorted shouldEqual List("0") // the init entity - }, 2.seconds) + val probe = TestProbe() + region.tell(GetShardRegionState, probe.ref) + val stats = probe.expectMsgType[CurrentShardRegionState] + stats.shards.head.shardId shouldEqual "0" + stats.shards.head.entityIds.toList.sorted shouldEqual List("0") // the init entity + }, 2.seconds) numberOfMessages } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala index 5d93a2f404..e423a15015 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala @@ -91,11 +91,11 @@ class ClusterShardingPreparingForShutdownSpec cluster.manager ! PrepareForFullClusterShutdown } awaitAssert({ - withClue("members: " + cluster.state.members) { - cluster.selfMember.status shouldEqual MemberStatus.ReadyForShutdown - cluster.state.members.unsorted.map(_.status) shouldEqual Set(MemberStatus.ReadyForShutdown) - } - }, 10.seconds) + withClue("members: " + cluster.state.members) { + cluster.selfMember.status shouldEqual MemberStatus.ReadyForShutdown + cluster.state.members.unsorted.map(_.status) shouldEqual Set(MemberStatus.ReadyForShutdown) + } + }, 10.seconds) enterBarrier("preparation-complete") shardRegion ! ShardingEnvelope("id2", Pinger.Ping(2, probe.ref)) @@ -104,26 +104,27 @@ class ClusterShardingPreparingForShutdownSpec runOn(second) { cluster.manager ! Leave(address(second)) } - awaitAssert({ - runOn(first, third) { - withClue("members: " + cluster.state.members) { - cluster.state.members.size shouldEqual 2 + awaitAssert( + { + runOn(first, third) { + withClue("members: " + cluster.state.members) { + cluster.state.members.size shouldEqual 2 + } } - } - runOn(second) { - withClue("self member: " + cluster.selfMember) { - cluster.selfMember.status shouldEqual MemberStatus.Removed + runOn(second) { + withClue("self member: " + cluster.selfMember) { + cluster.selfMember.status shouldEqual MemberStatus.Removed + } } - } - }, 5.seconds) // keep this lower than coordinated shutdown timeout + }, 5.seconds) // keep this lower than coordinated shutdown timeout // trigger creation of a new shard should be fine even though one node left runOn(first, third) { awaitAssert({ - shardRegion ! ShardingEnvelope("id3", Pinger.Ping(3, probe.ref)) - probe.expectMessage(Pong(3)) - }, 10.seconds) + shardRegion ! ShardingEnvelope("id3", Pinger.Ping(3, probe.ref)) + probe.expectMessage(Pong(3)) + }, 10.seconds) } enterBarrier("new-shards-verified") @@ -132,10 +133,10 @@ class ClusterShardingPreparingForShutdownSpec cluster.manager ! Leave(address(third)) } awaitAssert({ - withClue("self member: " + cluster.selfMember) { - cluster.selfMember.status shouldEqual Removed - } - }, 15.seconds) + withClue("self member: " + cluster.selfMember) { + cluster.selfMember.status shouldEqual Removed + } + }, 15.seconds) enterBarrier("done") } } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala index d40f2a82e7..cd6b558ca6 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala @@ -65,7 +65,7 @@ abstract class ClusterShardingStatsSpec private val typeKey = EntityTypeKey[Command]("ping") private val sharding = ClusterSharding(typedSystem) private val settings = ClusterShardingSettings(typedSystem) - private val queryTimeout = settings.shardRegionQueryTimeout * roles.size.toLong //numeric widening y'all + private val queryTimeout = settings.shardRegionQueryTimeout * roles.size.toLong // numeric widening y'all "Cluster sharding stats" must { "form cluster" in { diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala index 16ae746719..8b26f38e2e 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala @@ -109,10 +109,10 @@ abstract class MultiDcClusterShardingSpec "be able to message cross dc via proxy, defined with Entity" in { runOn(first, second) { val system = typedSystem - //#proxy-dc + // #proxy-dc val proxy: ActorRef[ShardingEnvelope[Command]] = ClusterSharding(system).init(Entity(typeKey)(_ => MultiDcPinger()).withDataCenter("dc2")) - //#proxy-dc + // #proxy-dc val probe = TestProbe[Pong]() proxy ! ShardingEnvelope(entityId, Ping(probe.ref)) probe.expectMessage(remainingOrDefault, Pong("dc2")) @@ -123,12 +123,12 @@ abstract class MultiDcClusterShardingSpec "be able to message cross dc via proxy, defined with EntityRef" in { runOn(first, second) { val system = typedSystem - //#proxy-dc-entityref + // #proxy-dc-entityref // it must still be started before usage ClusterSharding(system).init(Entity(typeKey)(_ => MultiDcPinger()).withDataCenter("dc2")) val entityRef = ClusterSharding(system).entityRefFor(typeKey, entityId, "dc2") - //#proxy-dc-entityref + // #proxy-dc-entityref val probe = TestProbe[Pong]() entityRef ! Ping(probe.ref) diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala index eff8eb1676..df7de3bc53 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala @@ -98,11 +98,12 @@ object ReplicatedShardingSpec extends MultiNodeConfig { def provider(): ReplicatedEntityProvider[Command] = { ReplicatedEntityProvider[Command]("TestRES", AllReplicas) { (entityTypeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(entityTypeKey) { entityContext => - Behaviors.setup { ctx => - TestRES(ReplicationId.fromString(entityContext.entityId), ctx) - } - }) + ReplicatedEntity(replicaId, + Entity(entityTypeKey) { entityContext => + Behaviors.setup { ctx => + TestRES(ReplicationId.fromString(entityContext.entityId), ctx) + } + }) }.withDirectReplication(true) // this is required as we don't have a shared read journal } } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala index ba8f626a68..944eb42cdf 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala @@ -84,9 +84,9 @@ abstract class ShardedDaemonProcessSpec enterBarrier("snitch-registered") probe.awaitAssert({ - typedSystem.receptionist ! Receptionist.Find(SnitchServiceKey, probe.ref) - probe.expectMessageType[Receptionist.Listing].serviceInstances(SnitchServiceKey).size should ===(1) - }, 5.seconds) + typedSystem.receptionist ! Receptionist.Find(SnitchServiceKey, probe.ref) + probe.expectMessageType[Receptionist.Listing].serviceInstances(SnitchServiceKey).size should ===(1) + }, 5.seconds) enterBarrier("snitch-seen") } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala index b2d662e2e3..5b53dade0f 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala @@ -166,7 +166,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { resultReporter: BenchmarkFileReporter): Unit = { val numberOfMessages = testSettings.totalMessages val took = NANOSECONDS.toMillis(System.nanoTime - startTime) - val throughput = (numberOfMessages * 1000.0 / took) + val throughput = numberOfMessages * 1000.0 / took resultReporter.reportResults( s"=== ${resultReporter.testName} ${testSettings.testName}: " + @@ -198,7 +198,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { context.messageAdapter[WorkPullingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var startTime = System.nanoTime() var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") + .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") Behaviors.receiveMessage { case WrappedRequestNext(next) => @@ -245,7 +245,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { context.messageAdapter[ShardingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var startTime = System.nanoTime() var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.sharding.consumer-controller.flow-control-window") + .getInt("akka.reliable-delivery.sharding.consumer-controller.flow-control-window") var latestDemand: ShardingProducerController.RequestNext[Consumer.Command] = null var messagesSentToEachEntity: Map[String, Long] = Map.empty[String, Long].withDefaultValue(0L) @@ -279,7 +279,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { remaining, latestDemand, messagesSentToEachEntity, - (remaining % testSettings.numberOfConsumers)) + remaining % testSettings.numberOfConsumers) Behaviors.same } } @@ -370,7 +370,7 @@ abstract class DeliveryThroughputSpec runPerfFlames(first, second, third)(delay = 5.seconds) runOn(second, third) { - val range = if (myself == second) (1 to numberOfConsumers by 2) else (2 to numberOfConsumers by 2) + val range = if (myself == second) 1 to numberOfConsumers by 2 else 2 to numberOfConsumers by 2 val consumers = range.map { n => val consumerController = spawn(ConsumerController[Consumer.Command](serviceKey(testName)), s"consumerController-$n-$testName") diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala index 12ca2b07e3..6192b18f15 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala @@ -316,7 +316,7 @@ class ClusterShardingSpec Behaviors.empty } - */ + */ } "EntityRef - AskTimeoutException" in { diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala index fdcc4d686f..9c052dfd4c 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala @@ -53,10 +53,10 @@ class ClusterShardingStateSpec shardingRef ! IdReplyPlz("id1", replyProbe.ref) replyProbe.expectMessage("Hello!") - //#get-region-state + // #get-region-state ClusterSharding(system).shardState ! GetShardRegionState(typeKey, probe.ref) val state = probe.receiveMessage() - //#get-region-state + // #get-region-state state.shards should be(Set(ShardState(shardExtractor.shardId("id1"), Set("id1")))) } } diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala index 09007794eb..0e95b19d89 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala @@ -69,8 +69,8 @@ class ShardedDaemonProcessSpec val probe = createTestProbe() Cluster(system).manager ! Join(Cluster(system).selfMember.address) probe.awaitAssert({ - Cluster(system).selfMember.status == MemberStatus.Up - }, 3.seconds) + Cluster(system).selfMember.status == MemberStatus.Up + }, 3.seconds) } "start N actors with unique ids" in { diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala index e7b628d74f..f9c497048d 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala @@ -22,7 +22,7 @@ import docs.akka.cluster.sharding.typed.AccountExampleWithEventHandlersInState.A //#testkit class AccountExampleDocSpec extends ScalaTestWithActorTestKit(EventSourcedBehaviorTestKit.config) - //#testkit + // #testkit with AnyWordSpecLike with BeforeAndAfterEach with LogCapturing { diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala index 6fdb8b3df2..2b8bf6c42c 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala @@ -22,17 +22,17 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithCommandHandlersInDurableState { - //#account-entity + // #account-entity object AccountEntity { // Command - //#reply-command + // #reply-command sealed trait Command extends CborSerializable - //#reply-command + // #reply-command final case class CreateAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command final case class Deposit(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command final case class CloseAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command @@ -85,7 +85,7 @@ object AccountExampleWithCommandHandlersInDurableState { balance - amount >= Zero } - //#reply + // #reply private def deposit(cmd: Deposit) = { Effect.persist(copy(balance = balance + cmd.amount)).thenReply(cmd.replyTo)(_ => StatusReply.Ack) } @@ -97,7 +97,7 @@ object AccountExampleWithCommandHandlersInDurableState { Effect.reply(cmd.replyTo)( StatusReply.Error(s"Insufficient balance ${balance} to be able to withdraw ${cmd.amount}")) } - //#reply + // #reply } case object ClosedAccount extends Account { @@ -123,13 +123,13 @@ object AccountExampleWithCommandHandlersInDurableState { val TypeKey: EntityTypeKey[Command] = EntityTypeKey[Command]("Account") - //#withEnforcedReplies + // #withEnforcedReplies def apply(persistenceId: PersistenceId): Behavior[Command] = { DurableStateBehavior .withEnforcedReplies[Command, Account](persistenceId, EmptyAccount, (state, cmd) => state.applyCommand(cmd)) } - //#withEnforcedReplies + // #withEnforcedReplies } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala index fe392daa6e..f4cd1dff46 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala @@ -23,7 +23,7 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithCommandHandlersInState { - //#account-entity + // #account-entity object AccountEntity { // Command sealed trait Command extends CborSerializable @@ -145,6 +145,6 @@ object AccountExampleWithCommandHandlersInState { } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala index 44881fa930..d202ff1c38 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala @@ -25,17 +25,17 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithEventHandlersInState { - //#account-entity + // #account-entity object AccountEntity { // Command - //#reply-command + // #reply-command sealed trait Command extends CborSerializable - //#reply-command + // #reply-command final case class CreateAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command final case class Deposit(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command final case class CloseAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command @@ -90,11 +90,11 @@ object AccountExampleWithEventHandlersInState { // When filling in the parameters of EventSourcedBehavior.apply you can use IntelliJ alt+Enter > createValue // to generate the stub with types for the command and event handlers. - //#withEnforcedReplies + // #withEnforcedReplies def apply(accountNumber: String, persistenceId: PersistenceId): Behavior[Command] = { EventSourcedBehavior.withEnforcedReplies(persistenceId, EmptyAccount, commandHandler(accountNumber), eventHandler) } - //#withEnforcedReplies + // #withEnforcedReplies private def commandHandler(accountNumber: String): (Account, Command) => ReplyEffect[Event, Account] = { (state, cmd) => @@ -149,7 +149,7 @@ object AccountExampleWithEventHandlersInState { Effect.persist(Deposited(cmd.amount)).thenReply(cmd.replyTo)(_ => StatusReply.Ack) } - //#reply + // #reply private def withdraw(acc: OpenedAccount, cmd: Withdraw): ReplyEffect[Event, Account] = { if (acc.canWithdraw(cmd.amount)) Effect.persist(Withdrawn(cmd.amount)).thenReply(cmd.replyTo)(_ => StatusReply.Ack) @@ -157,7 +157,7 @@ object AccountExampleWithEventHandlersInState { Effect.reply(cmd.replyTo)( StatusReply.Error(s"Insufficient balance ${acc.balance} to be able to withdraw ${cmd.amount}")) } - //#reply + // #reply private def getBalance(acc: OpenedAccount, cmd: GetBalance): ReplyEffect[Event, Account] = { Effect.reply(cmd.replyTo)(CurrentBalance(acc.balance)) @@ -171,6 +171,6 @@ object AccountExampleWithEventHandlersInState { } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala index 489a1b4cc5..7929b0c8c3 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala @@ -22,7 +22,7 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithOptionDurableState { - //#account-entity + // #account-entity object AccountEntity { // Command sealed trait Command extends CborSerializable @@ -121,6 +121,6 @@ object AccountExampleWithOptionDurableState { } } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala index 998df93498..fd7facc5b3 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala @@ -23,7 +23,7 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithOptionState { - //#account-entity + // #account-entity object AccountEntity { // Command sealed trait Command extends CborSerializable @@ -154,6 +154,6 @@ object AccountExampleWithOptionState { } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala index c3836a0c18..4afaacf7fd 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala @@ -13,7 +13,7 @@ import akka.stream.scaladsl.Source @nowarn object DurableStateStoreQueryUsageCompileOnlySpec { def getQuery[Record](system: ActorSystem, pluginId: String, offset: Offset) = { - //#get-durable-state-store-query-example + // #get-durable-state-store-query-example import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.query.scaladsl.DurableStateStoreQuery import akka.persistence.query.DurableStateChange @@ -26,6 +26,6 @@ object DurableStateStoreQueryUsageCompileOnlySpec { case UpdatedDurableState(persistenceId, revision, value, offset, timestamp) => Some(value) case _: DeletedDurableState[_] => None } - //#get-durable-state-store-query-example + // #get-durable-state-store-query-example } } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala index baacd6d4c7..615334a5ba 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala @@ -16,7 +16,7 @@ import akka.serialization.jackson.CborSerializable object HelloWorldPersistentEntityExample { - //#persistent-entity-usage + // #persistent-entity-usage import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.Entity import akka.util.Timeout @@ -40,9 +40,9 @@ object HelloWorldPersistentEntityExample { } } - //#persistent-entity-usage + // #persistent-entity-usage - //#persistent-entity + // #persistent-entity import akka.actor.typed.Behavior import akka.cluster.sharding.typed.scaladsl.EntityTypeKey import akka.persistence.typed.scaladsl.Effect @@ -89,6 +89,6 @@ object HelloWorldPersistentEntityExample { } } - //#persistent-entity + // #persistent-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala index 2a15a125a8..5f9fba6f38 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala @@ -27,35 +27,36 @@ object ReplicatedShardingCompileOnlySpec { def apply(replicationId: ReplicationId): Behavior[Command] = ??? } - //#bootstrap + // #bootstrap ReplicatedEntityProvider[Command]("MyEntityType", Set(ReplicaId("DC-A"), ReplicaId("DC-B"))) { (entityTypeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(entityTypeKey) { entityContext => - // the sharding entity id contains the business entityId, entityType, and replica id - // which you'll need to create a ReplicatedEventSourcedBehavior - val replicationId = ReplicationId.fromString(entityContext.entityId) - MyEventSourcedBehavior(replicationId) - }) + ReplicatedEntity(replicaId, + Entity(entityTypeKey) { entityContext => + // the sharding entity id contains the business entityId, entityType, and replica id + // which you'll need to create a ReplicatedEventSourcedBehavior + val replicationId = ReplicationId.fromString(entityContext.entityId) + MyEventSourcedBehavior(replicationId) + }) } - //#bootstrap + // #bootstrap - //#bootstrap-dc + // #bootstrap-dc ReplicatedEntityProvider.perDataCenter("MyEntityType", Set(ReplicaId("DC-A"), ReplicaId("DC-B"))) { replicationId => MyEventSourcedBehavior(replicationId) } - //#bootstrap-dc + // #bootstrap-dc - //#bootstrap-role + // #bootstrap-role val provider = ReplicatedEntityProvider.perRole("MyEntityType", Set(ReplicaId("DC-A"), ReplicaId("DC-B"))) { replicationId => MyEventSourcedBehavior(replicationId) } - //#bootstrap-role + // #bootstrap-role - //#sending-messages + // #sending-messages val myReplicatedSharding: ReplicatedSharding[Command] = ReplicatedShardingExtension(system).init(provider) val entityRefs: Map[ReplicaId, EntityRef[Command]] = myReplicatedSharding.entityRefsFor("myEntityId") - //#sending-messages + // #sending-messages } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala index 4bad30329a..f25f477ad3 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala @@ -24,16 +24,16 @@ object ShardingCompileOnlySpec { object Basics { - //#sharding-extension + // #sharding-extension import akka.cluster.sharding.typed.ShardingEnvelope import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.EntityTypeKey import akka.cluster.sharding.typed.scaladsl.EntityRef val sharding = ClusterSharding(system) - //#sharding-extension + // #sharding-extension - //#counter + // #counter object Counter { sealed trait Command case object Increment extends Command @@ -54,36 +54,36 @@ object ShardingCompileOnlySpec { } } - //#counter + // #counter - //#init + // #init val TypeKey = EntityTypeKey[Counter.Command]("Counter") val shardRegion: ActorRef[ShardingEnvelope[Counter.Command]] = sharding.init(Entity(TypeKey)(createBehavior = entityContext => Counter(entityContext.entityId))) - //#init + // #init - //#send + // #send // With an EntityRef val counterOne: EntityRef[Counter.Command] = sharding.entityRefFor(TypeKey, "counter-1") counterOne ! Counter.Increment // Entity id is specified via an `ShardingEnvelope` shardRegion ! ShardingEnvelope("counter-1", Counter.Increment) - //#send + // #send - //#persistence + // #persistence val BlogTypeKey = EntityTypeKey[Command]("BlogPost") ClusterSharding(system).init(Entity(BlogTypeKey) { entityContext => BlogPostEntity(entityContext.entityId, PersistenceId(entityContext.entityTypeKey.name, entityContext.entityId)) }) - //#persistence + // #persistence - //#roles + // #roles sharding.init( Entity(TypeKey)(createBehavior = entityContext => Counter(entityContext.entityId)).withRole("backend")) - //#roles + // #roles } @@ -91,7 +91,7 @@ object ShardingCompileOnlySpec { import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.EntityTypeKey - //#counter-passivate + // #counter-passivate object Counter { sealed trait Command case object Increment extends Command @@ -122,14 +122,14 @@ object ShardingCompileOnlySpec { } } } - //#counter-passivate + // #counter-passivate - //#counter-passivate-init + // #counter-passivate-init val TypeKey = EntityTypeKey[Counter.Command]("Counter") ClusterSharding(system).init(Entity(TypeKey)(createBehavior = entityContext => Counter(entityContext.shard, entityContext.entityId)).withStopMessage(Counter.GoodByeCounter)) - //#counter-passivate-init + // #counter-passivate-init } @@ -138,7 +138,7 @@ object ShardingCompileOnlySpec { import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.EntityTypeKey - //#sharded-response + // #sharded-response // a sharded actor that needs counter updates object CounterConsumer { sealed trait Command @@ -169,7 +169,7 @@ object ShardingCompileOnlySpec { } } - //#sharded-response + // #sharded-response } object ShardRegionStateQuery { @@ -180,14 +180,14 @@ object ShardingCompileOnlySpec { val replyMessageAdapter: ActorRef[akka.cluster.sharding.ShardRegion.CurrentShardRegionState] = ??? - //#get-shard-region-state + // #get-shard-region-state import akka.cluster.sharding.typed.GetShardRegionState import akka.cluster.sharding.ShardRegion.CurrentShardRegionState val replyTo: ActorRef[CurrentShardRegionState] = replyMessageAdapter ClusterSharding(system).shardState ! GetShardRegionState(Counter.TypeKey, replyTo) - //#get-shard-region-state + // #get-shard-region-state } object ClusterShardingStatsQuery { @@ -198,7 +198,7 @@ object ShardingCompileOnlySpec { val replyMessageAdapter: ActorRef[akka.cluster.sharding.ShardRegion.ClusterShardingStats] = ??? - //#get-cluster-sharding-stats + // #get-cluster-sharding-stats import akka.cluster.sharding.typed.GetClusterShardingStats import akka.cluster.sharding.ShardRegion.ClusterShardingStats import scala.concurrent.duration._ @@ -207,7 +207,7 @@ object ShardingCompileOnlySpec { val timeout: FiniteDuration = 5.seconds ClusterSharding(system).shardState ! GetClusterShardingStats(Counter.TypeKey, timeout, replyTo) - //#get-cluster-sharding-stats + // #get-cluster-sharding-stats } } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala index 39226ba7b8..33d7421db4 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala @@ -21,7 +21,7 @@ import akka.actor.typed.scaladsl.Behaviors @nowarn("msg=never used") object PointToPointDocExample { - //#producer + // #producer object FibonacciProducer { sealed trait Command @@ -51,9 +51,9 @@ object PointToPointDocExample { } } } - //#producer + // #producer - //#consumer + // #consumer import akka.actor.typed.delivery.ConsumerController object FibonacciConsumer { @@ -79,12 +79,12 @@ object PointToPointDocExample { } } } - //#consumer + // #consumer object Guardian { def apply(): Behavior[Nothing] = { Behaviors.setup[Nothing] { context => - //#connect + // #connect val consumerController = context.spawn(ConsumerController[FibonacciConsumer.Command](), "consumerController") context.spawn(FibonacciConsumer(consumerController), "consumer") @@ -95,7 +95,7 @@ object PointToPointDocExample { context.spawn(FibonacciProducer(producerController), "producer") consumerController ! ConsumerController.RegisterToProducerController(producerController) - //#connect + // #connect Behaviors.empty } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala index 5a86ab7f0b..77354eed0f 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala @@ -23,7 +23,7 @@ import akka.util.Timeout object ShardingDocExample { - //#consumer + // #consumer trait DB { def save(id: String, value: TodoList.State): Future[Done] def load(id: String): Future[TodoList.State] @@ -102,9 +102,9 @@ object ShardingDocExample { } } } - //#consumer + // #consumer - //#producer + // #producer import akka.cluster.sharding.typed.delivery.ShardingProducerController object TodoService { @@ -182,11 +182,11 @@ object ShardingDocExample { } } - //#producer + // #producer def illustrateInit(): Unit = { Behaviors.setup[Nothing] { context => - //#init + // #init import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.Entity import akka.cluster.sharding.typed.scaladsl.EntityTypeKey @@ -208,7 +208,7 @@ object ShardingDocExample { context.spawn(ShardingProducerController(producerId, region, durableQueueBehavior = None), "producerController") context.spawn(TodoService(producerController), "producer") - //#init + // #init Behaviors.empty } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala index f9ee80a853..0dd1af132b 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala @@ -17,12 +17,12 @@ import scala.annotation.nowarn @nowarn("msg=never used") object WorkPullingDocExample { - //#imports + // #imports import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.Behavior - //#imports + // #imports - //#consumer + // #consumer import akka.actor.typed.delivery.ConsumerController import akka.actor.typed.receptionist.ServiceKey @@ -59,9 +59,9 @@ object WorkPullingDocExample { } } - //#consumer + // #consumer - //#producer + // #producer import akka.actor.typed.delivery.WorkPullingProducerController import akka.actor.typed.scaladsl.ActorContext import akka.actor.typed.scaladsl.StashBuffer @@ -74,9 +74,9 @@ object WorkPullingDocExample { final case class GetResult(resultId: UUID, replyTo: ActorRef[Option[Array[Byte]]]) extends Command - //#producer + // #producer - //#ask + // #ask final case class ConvertRequest( fromFormat: String, toFormat: String, @@ -91,9 +91,9 @@ object WorkPullingDocExample { private final case class AskReply(resultId: UUID, originalReplyTo: ActorRef[ConvertResponse], timeout: Boolean) extends Command - //#ask + // #ask - //#producer + // #producer def apply(): Behavior[Command] = { Behaviors.setup { context => val requestNextAdapter = @@ -105,8 +105,8 @@ object WorkPullingDocExample { workerServiceKey = ImageConverter.serviceKey, durableQueueBehavior = None), "producerController") - //#producer - //#durable-queue + // #producer + // #durable-queue import akka.persistence.typed.delivery.EventSourcedProducerQueue import akka.persistence.typed.PersistenceId @@ -118,8 +118,8 @@ object WorkPullingDocExample { workerServiceKey = ImageConverter.serviceKey, durableQueueBehavior = Some(durableQueue)), "producerController") - //#durable-queue - //#producer + // #durable-queue + // #producer producerController ! WorkPullingProducerController.Start(requestNextAdapter) Behaviors.withStash(1000) { stashBuffer => @@ -168,9 +168,9 @@ object WorkPullingDocExample { throw new IllegalStateException("Unexpected RequestNext") } } - //#producer + // #producer object askScope { - //#ask + // #ask import WorkPullingProducerController.MessageWithConfirmation import akka.util.Timeout @@ -224,10 +224,10 @@ object WorkPullingDocExample { } } - //#ask + // #ask } - //#producer + // #producer } - //#producer + // #producer } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index 2738467790..eb4855483f 100755 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -152,7 +152,6 @@ import akka.util.ccompat.JavaConverters._ * then supposed to stop itself. Incoming messages will be buffered by the `ShardRegion` * between reception of `Passivate` and termination of the entity. Such buffered messages * are thereafter delivered to a new incarnation of the entity. - * */ object ClusterSharding extends ExtensionId[ClusterSharding] with ExtensionIdProvider { @@ -603,10 +602,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { dataCenter: Optional[String], messageExtractor: ShardRegion.MessageExtractor): ActorRef = { - startProxy(typeName, Option(role.orElse(null)), Option(dataCenter.orElse(null)), extractEntityId = { - case msg if messageExtractor.entityId(msg) ne null => - (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) - }, extractShardId = msg => messageExtractor.shardId(msg)) + startProxy(typeName, Option(role.orElse(null)), Option(dataCenter.orElse(null)), + extractEntityId = { + case msg if messageExtractor.entityId(msg) ne null => + (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) + }, extractShardId = msg => messageExtractor.shardId(msg)) } @@ -728,7 +728,7 @@ private[akka] class ClusterShardingGuardian extends Actor { private def replicator(settings: ClusterShardingSettings): ActorRef = { if (settings.stateStoreMode == ClusterShardingSettings.StateStoreModeDData || - settings.stateStoreMode == ClusterShardingSettings.RememberEntitiesStoreCustom) { + settings.stateStoreMode == ClusterShardingSettings.RememberEntitiesStoreCustom) { // one Replicator per role replicatorByRole.get(settings.role) match { case Some(ref) => ref @@ -747,13 +747,13 @@ private[akka] class ClusterShardingGuardian extends Actor { def receive: Receive = { case Start( - typeName, - entityProps, - settings, - extractEntityId, - extractShardId, - allocationStrategy, - handOffStopMessage) => + typeName, + entityProps, + settings, + extractEntityId, + extractShardId, + allocationStrategy, + handOffStopMessage) => try { import settings.role import settings.tuningParameters.coordinatorFailureBackoff diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala index 7435b9437b..a200bf11da 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala @@ -852,15 +852,15 @@ object ClusterShardingSettings { (if (dynamicAging) " (with dynamic aging)" else "") + idle.fold("")(idle => " and " + describe(idle)) case CompositePassivationStrategy( - limit, - mainStrategy, - windowStrategy, - initialWindowProportion, - minimumWindowProportion, - maximumWindowProportion, - windowOptimizer, - admissionFilter, - idle) => + limit, + mainStrategy, + windowStrategy, + initialWindowProportion, + minimumWindowProportion, + maximumWindowProportion, + windowOptimizer, + admissionFilter, + idle) => val describeWindow = windowStrategy match { case NoPassivationStrategy => "no admission window" case _ => @@ -869,10 +869,10 @@ object ClusterShardingSettings { case CompositePassivationStrategy.NoAdmissionOptimizer => s" with proportion [$initialWindowProportion]" case CompositePassivationStrategy.HillClimbingAdmissionOptimizer( - adjustMultiplier, - initialStep, - restartThreshold, - stepDecay) => + adjustMultiplier, + initialStep, + restartThreshold, + stepDecay) => s" with proportions [initial = $initialWindowProportion, min = $minimumWindowProportion, max = $maximumWindowProportion]" + " adapting with hill-climbing optimizer [" + s"adjust multiplier = $adjustMultiplier, " + @@ -884,10 +884,10 @@ object ClusterShardingSettings { val describeFilter = admissionFilter match { case CompositePassivationStrategy.AlwaysAdmissionFilter => "always admit" case CompositePassivationStrategy.FrequencySketchAdmissionFilter( - widthMultiplier, - resetMultiplier, - depth, - counterBits) => + widthMultiplier, + resetMultiplier, + depth, + counterBits) => "admit using frequency sketch [" + s"width multiplier = $widthMultiplier, " + s"reset multiplier = $resetMultiplier, " + diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala index 45db16af50..63599a6448 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala @@ -64,7 +64,7 @@ object RemoveInternalClusterShardingData { println("Specify the Cluster Sharding type names to remove in program arguments") else { val system = ActorSystem("RemoveInternalClusterShardingData") - val remove2dot3Data = (args(0) == "-2.3") + val remove2dot3Data = args(0) == "-2.3" val typeNames = if (remove2dot3Data) args.tail.toSet else args.toSet if (typeNames.isEmpty) println("Specify the Cluster Sharding type names to remove in program arguments") diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala index fd69abd401..8e38848e90 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala @@ -151,7 +151,7 @@ private[akka] object Shard { * +------------------------------------------------------------------------------------------+------------------------------------------------+<-------------+ * stop stored/passivation complete * }}} - **/ + */ sealed trait EntityState { def transition(newState: EntityState, entities: Entities): EntityState final def invalidTransition(to: EntityState, entities: Entities): EntityState = { @@ -465,12 +465,11 @@ private[akka] class Shard( context.system.scheduler.scheduleWithFixedDelay(interval, interval, self, PassivateIntervalTick) } - private val lease = settings.leaseSettings.map( - ls => - LeaseProvider(context.system).getLease( - s"${context.system.name}-shard-$typeName-$shardId", - ls.leaseImplementation, - Cluster(context.system).selfAddress.hostPort)) + private val lease = settings.leaseSettings.map(ls => + LeaseProvider(context.system).getLease( + s"${context.system.name}-shard-$typeName-$shardId", + ls.leaseImplementation, + Cluster(context.system).selfAddress.hostPort)) private val leaseRetryInterval = settings.leaseSettings match { case Some(l) => l.leaseRetryInterval @@ -882,7 +881,7 @@ private[akka] class Shard( HandOffStopper.props(typeName, shardId, replyTo, activeEntities, handOffStopMessage, entityHandOffTimeout), "HandOffStopper"))) - //During hand off we only care about watching for termination of the hand off stopper + // During hand off we only care about watching for termination of the hand off stopper context.become { case Terminated(ref) => receiveTerminated(ref) } @@ -1152,7 +1151,7 @@ private[akka] class Shard( // After entity started def sendMsgBuffer(entityId: EntityId): Unit = { - //Get the buffered messages and remove the buffer + // Get the buffered messages and remove the buffer val messages = messageBuffers.getOrEmpty(entityId) messageBuffers.remove(entityId) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala index 6d44503cf6..91390a764e 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala @@ -477,7 +477,7 @@ object ShardCoordinator { case ShardRegionTerminated(region) => require(regions.contains(region), s"Terminated region $region not registered: $this") val newUnallocatedShards = - if (rememberEntities) (unallocatedShards ++ regions(region)) else unallocatedShards + if (rememberEntities) unallocatedShards ++ regions(region) else unallocatedShards copy(regions = regions - region, shards = shards -- regions(region), unallocatedShards = newUnallocatedShards) case ShardRegionProxyTerminated(proxy) => require(regionProxies.contains(proxy), s"Terminated region proxy $proxy not registered: $this") @@ -486,7 +486,7 @@ object ShardCoordinator { require(regions.contains(region), s"Region $region not registered: $this") require(!shards.contains(shard), s"Shard [$shard] already allocated: $this") val newUnallocatedShards = - if (rememberEntities) (unallocatedShards - shard) else unallocatedShards + if (rememberEntities) unallocatedShards - shard else unallocatedShards copy( shards = shards.updated(shard, region), regions = regions.updated(region, regions(region) :+ shard), @@ -496,7 +496,7 @@ object ShardCoordinator { val region = shards(shard) require(regions.contains(region), s"Region $region for shard [$shard] not registered: $this") val newUnallocatedShards = - if (rememberEntities) (unallocatedShards + shard) else unallocatedShards + if (rememberEntities) unallocatedShards + shard else unallocatedShards copy( shards = shards - shard, regions = regions.updated(region, regions(region).filterNot(_ == shard)), @@ -802,7 +802,7 @@ abstract class ShardCoordinator( case ResendShardHost(shard, region) => state.shards.get(shard) match { case Some(`region`) => sendHostShardMsg(shard, region) - case _ => //Reallocated to another region + case _ => // Reallocated to another region } case RebalanceTick => @@ -941,7 +941,7 @@ abstract class ShardCoordinator( private def terminate(): Unit = { if (aliveRegions.exists(_.path.address.hasLocalScope) || gracefulShutdownInProgress.exists( - _.path.address.hasLocalScope)) { + _.path.address.hasLocalScope)) { aliveRegions .find(_.path.address.hasLocalScope) .foreach(region => @@ -1195,7 +1195,7 @@ abstract class ShardCoordinator( case Some(ref) => getShardHomeSender ! ShardHome(shard, ref) case None => if (state.regions.contains(region) && !gracefulShutdownInProgress(region) && !regionTerminationInProgress - .contains(region)) { + .contains(region)) { update(ShardHomeAllocated(shard, region)) { evt => state = state.updated(evt) log.debug( @@ -1352,7 +1352,7 @@ class PersistentShardCoordinator( if (verboseDebug) log.debug("{}: receiveRecover SnapshotOffer {}", typeName, st) state = st.withRememberEntities(settings.rememberEntities) - //Old versions of the state object may not have unallocatedShard set, + // Old versions of the state object may not have unallocatedShard set, // thus it will be null. if (state.unallocatedShards == null) state = state.copy(unallocatedShards = Set.empty) @@ -1640,8 +1640,9 @@ private[akka] class DDataShardCoordinator( case UpdateTimeout(CoordinatorStateKey, Some(`evt`)) => updateStateRetries += 1 - val template = s"$typeName: The ShardCoordinator was unable to update a distributed state within 'updating-state-timeout': ${stateWriteConsistency.timeout.toMillis} millis (${if (terminating) "terminating" - else "retrying"}). Attempt $updateStateRetries. " + + val template = + s"$typeName: The ShardCoordinator was unable to update a distributed state within 'updating-state-timeout': ${stateWriteConsistency.timeout.toMillis} millis (${if (terminating) "terminating" + else "retrying"}). Attempt $updateStateRetries. " + s"Perhaps the ShardRegion has not started on all active nodes yet? event=$evt" if (updateStateRetries < 5) { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index 207d3e4947..77c8e2459e 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -304,7 +304,6 @@ object ShardRegion { def getRegionStatsInstance = GetShardRegionStats /** - * * @param stats the region stats mapping of `ShardId` to number of entities * @param failed set of shards if any failed to respond within the timeout */ @@ -793,8 +792,7 @@ private[akka] class ShardRegion( preparingForShutdown = true case _: MemberEvent => // these are expected, no need to warn about them - - case _ => unhandled(evt) + case _ => unhandled(evt) } private def addMember(m: Member): Unit = { @@ -819,7 +817,7 @@ private[akka] class ShardRegion( regionByShard = regionByShard.updated(shard, self) regions = regions.updated(self, regions.getOrElse(self, Set.empty) + shard) - //Start the shard, if already started this does nothing + // Start the shard, if already started this does nothing getShard(shard) sender() ! ShardStarted(shard) } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala index 0fbc6fc0ae..4c154c1b85 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala @@ -31,7 +31,8 @@ private[sharding] object ShardingQueries { total: Int, timeout: FiniteDuration) { - /** The number of shards queried, which could equal the `total` or, + /** + * The number of shards queried, which could equal the `total` or, * be a subset if this was a retry of those that failed. */ val queried: Int = failed.size + responses.size diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala index b585f2de79..9039343457 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala @@ -180,28 +180,29 @@ class ExternalShardAllocationStrategy(systemProvider: ClassicActorSystemProvider log.debug("Current allocations by address: [{}]", currentAllocationByAddress) - val shardsThatNeedRebalanced: Future[Set[ShardId]] = for { - desiredMappings <- (shardState ? GetShardLocations).mapTo[GetShardLocationsResponse] - } yield { - log.debug("desired allocations: [{}]", desiredMappings.desiredAllocations) - desiredMappings.desiredAllocations.filter { - case (shardId, expectedLocation) if currentlyAllocatedShards.contains(shardId) => - currentAllocationByAddress.get(expectedLocation) match { - case None => - log.debug( - "Shard [{}] desired location [{}] is not part of the cluster, not rebalancing", - shardId, - expectedLocation) - false // not a current allocation so don't rebalance yet - case Some(shards) => - val inCorrectLocation = shards.contains(shardId) - !inCorrectLocation - } - case (shardId, _) => - log.debug("Shard [{}] not currently allocated so not rebalancing to desired location", shardId) - false - } - }.keys.toSet + val shardsThatNeedRebalanced: Future[Set[ShardId]] = + for { + desiredMappings <- (shardState ? GetShardLocations).mapTo[GetShardLocationsResponse] + } yield { + log.debug("desired allocations: [{}]", desiredMappings.desiredAllocations) + desiredMappings.desiredAllocations.filter { + case (shardId, expectedLocation) if currentlyAllocatedShards.contains(shardId) => + currentAllocationByAddress.get(expectedLocation) match { + case None => + log.debug( + "Shard [{}] desired location [{}] is not part of the cluster, not rebalancing", + shardId, + expectedLocation) + false // not a current allocation so don't rebalance yet + case Some(shards) => + val inCorrectLocation = shards.contains(shardId) + !inCorrectLocation + } + case (shardId, _) => + log.debug("Shard [{}] not currently allocated so not rebalancing to desired location", shardId) + false + } + }.keys.toSet shardsThatNeedRebalanced .map { done => diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala index fedbcb454b..98ce81cc7b 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala @@ -86,7 +86,7 @@ final private[external] class ExternalShardAllocationClientImpl(system: ActorSys case NotFound(_, _) => Future.successful(Map.empty[ShardId, ShardLocation]) case GetFailure(_, _) => - Future.failed((new ClientTimeoutException(s"Unable to get shard locations after ${timeout.duration.pretty}"))) + Future.failed(new ClientTimeoutException(s"Unable to get shard locations after ${timeout.duration.pretty}")) case _ => throw new IllegalArgumentException() // compiler exhaustiveness check pleaser } .map { locations => diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala index 447bc807ee..2bdcc122e2 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala @@ -58,8 +58,6 @@ private[akka] object AbstractLeastShardAllocationStrategy { } /** - - * * INTERNAL API */ @InternalApi diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala index 16f804b7b7..3e290641fa 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala @@ -178,17 +178,18 @@ private[akka] final class DDataRememberEntitiesShardStore( } private def onUpdate(update: RememberEntitiesShardStore.Update): Unit = { - val allEvts: Set[Evt] = (update.started.map(Started(_): Evt).union(update.stopped.map(Stopped(_)))) + val allEvts: Set[Evt] = update.started.map(Started(_): Evt).union(update.stopped.map(Stopped(_))) // map from set of evts (for same ddata key) to one update that applies each of them val ddataUpdates: Map[Set[Evt], (Update[ORSet[EntityId]], Int)] = allEvts.groupBy(evt => key(evt.id)).map { case (key, evts) => - (evts, (Update(key, ORSet.empty[EntityId], writeMajority, Some(evts)) { existing => - evts.foldLeft(existing) { - case (acc, Started(id)) => acc :+ id - case (acc, Stopped(id)) => acc.remove(id) - } - }, maxUpdateAttempts)) + (evts, + (Update(key, ORSet.empty[EntityId], writeMajority, Some(evts)) { existing => + evts.foldLeft(existing) { + case (acc, Started(id)) => acc :+ id + case (acc, Stopped(id)) => acc.remove(id) + } + }, maxUpdateAttempts)) } ddataUpdates.foreach { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala index fa9ed1dcc0..bd81024bea 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala @@ -744,10 +744,10 @@ private[akka] object AdmissionOptimizer { optimizer: ClusterShardingSettings.CompositePassivationStrategy.AdmissionOptimizer): AdmissionOptimizer = optimizer match { case ClusterShardingSettings.CompositePassivationStrategy.HillClimbingAdmissionOptimizer( - adjustMultiplier, - initialStep, - restartThreshold, - stepDecay) => + adjustMultiplier, + initialStep, + restartThreshold, + stepDecay) => new HillClimbingAdmissionOptimizer(initialLimit, adjustMultiplier, initialStep, restartThreshold, stepDecay) case _ => NoAdmissionOptimizer } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala index 225e85caa6..33dd97588b 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala @@ -404,11 +404,11 @@ class RandomizedSplitBrainResolverIntegrationSpec "SplitBrainResolver with lease" must { for (scenario <- scenarios) { - scenario.toString taggedAs (LongRunningTest) in { + scenario.toString taggedAs LongRunningTest in { // temporarily disabled for aeron-udp in multi-node: https://github.com/akka/akka/pull/30706/ val arteryConfig = system.settings.config.getConfig("akka.remote.artery") if (arteryConfig.getInt("canonical.port") == 6000 && - arteryConfig.getString("transport") == "aeron-udp") { + arteryConfig.getString("transport") == "aeron-udp") { pending } DisposableSys(scenario).verify() diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala index a11df3908f..a700fc3f2f 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala @@ -447,10 +447,11 @@ class SplitBrainResolverIntegrationSpec Scenario(keepOldestConfig, 3, 3, KeepSide1), Scenario(keepOldestConfig, 1, 1, KeepSide1), Scenario(keepOldestConfig, 1, 2, KeepSide2), // because down-if-alone - Scenario(keepMajorityConfig, 3, 2, KeepAll, { - case `node1` | `node2` | `node3` => "dcA" - case _ => "dcB" - }), + Scenario(keepMajorityConfig, 3, 2, KeepAll, + { + case `node1` | `node2` | `node3` => "dcA" + case _ => "dcB" + }), Scenario(downAllConfig, 1, 2, ShutdownBoth), Scenario(leaseMajorityConfig, 4, 5, KeepSide2)) @@ -461,7 +462,7 @@ class SplitBrainResolverIntegrationSpec // temporarily disabled for aeron-udp in multi-node: https://github.com/akka/akka/pull/30706/ val arteryConfig = system.settings.config.getConfig("akka.remote.artery") if (arteryConfig.getInt("canonical.port") == 6000 && - arteryConfig.getString("transport") == "aeron-udp") { + arteryConfig.getString("transport") == "aeron-udp") { pending } DisposableSys(scenario).verify() diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala index 311b2f5da0..969fdd6a40 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala @@ -167,7 +167,7 @@ abstract class ClusterShardCoordinatorDowning2Spec(multiNodeConfig: ClusterShard region.tell(Ping(id), probe.ref) if (ref.path.address == secondAddress) { val newRef = probe.expectMsgType[ActorRef](1.second) - newRef should not be (ref) + newRef should not be ref system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) } else probe.expectMsg(1.second, ref) // should not move diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala index 9465fd20b2..31996e3b29 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala @@ -169,7 +169,7 @@ abstract class ClusterShardCoordinatorDowningSpec(multiNodeConfig: ClusterShardC region.tell(Ping(id), probe.ref) if (ref.path.address == firstAddress) { val newRef = probe.expectMsgType[ActorRef](1.second) - newRef should not be (ref) + newRef should not be ref system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) } else probe.expectMsg(1.second, ref) // should not move diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala index 0f2ee0ba89..9db8562e3b 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala @@ -175,13 +175,13 @@ abstract class ClusterShardingFailureSpec(multiNodeConfig: ClusterShardingFailur val entity21 = lastSender val shard2 = system.actorSelection(entity21.path.parent) - //Test the ShardCoordinator allocating shards after a journal/network failure + // Test the ShardCoordinator allocating shards after a journal/network failure region ! Add("30", 3) - //Test the Shard starting entities and persisting after a journal/network failure + // Test the Shard starting entities and persisting after a journal/network failure region ! Add("11", 1) - //Test the Shard passivate works after a journal failure + // Test the Shard passivate works after a journal failure shard2.tell(Passivate(PoisonPill), entity21) awaitAssert { diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala index 8860d46ee8..58ccc732ac 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala @@ -7,7 +7,8 @@ package akka.cluster.sharding import akka.testkit._ object ClusterShardingIncorrectSetupSpecConfig - extends MultiNodeClusterShardingConfig(additionalConfig = "akka.cluster.sharding.waiting-for-state-timeout = 100ms") { + extends MultiNodeClusterShardingConfig( + additionalConfig = "akka.cluster.sharding.waiting-for-state-timeout = 100ms") { val first = role("first") val second = role("second") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala index 97f209a4df..2f22ed7e3c 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala @@ -165,7 +165,7 @@ abstract class ClusterShardingLeavingSpec(multiNodeConfig: ClusterShardingLeavin region.tell(Ping(id), probe.ref) if (leavingNodes.contains(ref.path.address)) { val newRef = probe.expectMsgType[ActorRef](1.second) - newRef should not be (ref) + newRef should not be ref system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) } else probe.expectMsg(1.second, ref) // should not move diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala index bf47720c0b..e9c3fe89a2 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala @@ -112,16 +112,16 @@ abstract class ClusterShardingQueriesSpec val probe = TestProbe() val region = ClusterSharding(system).shardRegion(shardTypeName) awaitAssert({ - region.tell(ShardRegion.GetClusterShardingStats(10.seconds), probe.ref) - val regions = probe.expectMsgType[ShardRegion.ClusterShardingStats].regions - regions.size shouldEqual 3 - val timeouts = numberOfShards / regions.size + region.tell(ShardRegion.GetClusterShardingStats(10.seconds), probe.ref) + val regions = probe.expectMsgType[ShardRegion.ClusterShardingStats].regions + regions.size shouldEqual 3 + val timeouts = numberOfShards / regions.size - // 3 regions, 2 shards per region, all 2 shards/region were unresponsive - // within shard-region-query-timeout, which only on first is 0ms - regions.values.map(_.stats.size).sum shouldEqual 4 - regions.values.map(_.failed.size).sum shouldEqual timeouts - }, max = 10.seconds) + // 3 regions, 2 shards per region, all 2 shards/region were unresponsive + // within shard-region-query-timeout, which only on first is 0ms + regions.values.map(_.stats.size).sum shouldEqual 4 + regions.values.map(_.failed.size).sum shouldEqual timeouts + }, max = 10.seconds) } enterBarrier("received failed stats from timed out shards vs empty") } @@ -131,11 +131,11 @@ abstract class ClusterShardingQueriesSpec val probe = TestProbe() val region = ClusterSharding(system).shardRegion(shardTypeName) awaitAssert({ - region.tell(ShardRegion.GetShardRegionState, probe.ref) - val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards.isEmpty shouldEqual true - state.failed.size shouldEqual 2 - }, max = 10.seconds) + region.tell(ShardRegion.GetShardRegionState, probe.ref) + val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] + state.shards.isEmpty shouldEqual true + state.failed.size shouldEqual 2 + }, max = 10.seconds) } enterBarrier("query-timeout-on-busy-node") @@ -143,11 +143,11 @@ abstract class ClusterShardingQueriesSpec val probe = TestProbe() val region = ClusterSharding(system).shardRegion(shardTypeName) awaitAssert({ - region.tell(ShardRegion.GetShardRegionState, probe.ref) - val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards.size shouldEqual 2 - state.failed.isEmpty shouldEqual true - }, max = 10.seconds) + region.tell(ShardRegion.GetShardRegionState, probe.ref) + val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] + state.shards.size shouldEqual 2 + state.failed.isEmpty shouldEqual true + }, max = 10.seconds) } enterBarrier("done") } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala index b874ff1547..40ca3194fe 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala @@ -182,7 +182,8 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec( enterBarrier("first-sharding-cluster-stopped") } - "start new nodes with different extractor, and have the entities running on the right shards" in within(30.seconds) { + "start new nodes with different extractor, and have the entities running on the right shards" in within( + 30.seconds) { // start it with a new shard id extractor, which will put the entities // on different shards diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index 0d633936de..dc77f7d8c2 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -25,7 +25,7 @@ import akka.testkit.TestEvent.Mute import scala.annotation.nowarn object ClusterShardingSpec { - //#counter-actor + // #counter-actor case object Increment case object Decrement final case class Get(counterId: Long) @@ -43,14 +43,14 @@ object ClusterShardingSpec { override def persistenceId: String = "Counter-" + self.path.name var count = 0 - //#counter-actor + // #counter-actor override def postStop(): Unit = { super.postStop() // Simulate that the passivation takes some time, to verify passivation buffering Thread.sleep(500) } - //#counter-actor + // #counter-actor def updateState(event: CounterChanged): Unit = count += event.delta @@ -67,7 +67,7 @@ object ClusterShardingSpec { case Stop => context.stop(self) } } - //#counter-actor + // #counter-actor val extractEntityId: ShardRegion.ExtractEntityId = { case EntityEnvelope(id, payload) => (id.toString, payload) @@ -92,7 +92,7 @@ object ClusterShardingSpec { class AnotherCounter extends QualifiedCounter("AnotherCounter") - //#supervisor + // #supervisor class CounterSupervisor extends Actor { val counter = context.actorOf(Props[Counter](), "theCounter") @@ -107,7 +107,7 @@ object ClusterShardingSpec { case msg => counter.forward(msg) } } - //#supervisor + // #supervisor } @@ -125,7 +125,8 @@ abstract class ClusterShardingSpecConfig( val fifth = role("fifth") val sixth = role("sixth") - /** This is the only test that creates the shared store regardless of mode, + /** + * This is the only test that creates the shared store regardless of mode, * because it uses a PersistentActor. So unlike all other uses of * `MultiNodeClusterShardingConfig`, we use `MultiNodeConfig.commonConfig` here, * and call `MultiNodeClusterShardingConfig.persistenceConfig` which does not check @@ -182,7 +183,7 @@ abstract class ClusterShardingSpecConfig( object ClusterShardingDocCode { import ClusterShardingSpec._ - //#counter-extractor + // #counter-extractor val extractEntityId: ShardRegion.ExtractEntityId = { case EntityEnvelope(id, payload) => (id.toString, payload) case msg @ Get(id) => (id.toString, msg) @@ -198,10 +199,10 @@ object ClusterShardingDocCode { (id.toLong % numberOfShards).toString case _ => throw new IllegalArgumentException() } - //#counter-extractor + // #counter-extractor { - //#extractShardId-StartEntity + // #extractShardId-StartEntity val extractShardId: ShardRegion.ExtractShardId = { case EntityEnvelope(id, _) => (id % numberOfShards).toString case Get(id) => (id % numberOfShards).toString @@ -210,7 +211,7 @@ object ClusterShardingDocCode { (id.toLong % numberOfShards).toString case _ => throw new IllegalArgumentException() } - //#extractShardId-StartEntity + // #extractShardId-StartEntity extractShardId.toString() // keep the compiler happy } @@ -661,7 +662,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) if (probe.lastSender.path == rebalancingRegion.path / (n % 12).toString / n.toString) count += 1 } - count should be >= (2) + count should be >= 2 } } } @@ -672,14 +673,14 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) "easy to use with extensions" in within(50.seconds) { runOn(third, fourth, fifth, sixth) { - //#counter-start + // #counter-start val counterRegion: ActorRef = ClusterSharding(system).start( typeName = "Counter", entityProps = Props[Counter](), settings = ClusterShardingSettings(system), extractEntityId = extractEntityId, extractShardId = extractShardId) - //#counter-start + // #counter-start counterRegion.toString // keep the compiler happy ClusterSharding(system).start( @@ -689,18 +690,18 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) extractEntityId = extractEntityId, extractShardId = extractShardId) - //#counter-supervisor-start + // #counter-supervisor-start ClusterSharding(system).start( typeName = "SupervisedCounter", entityProps = Props[CounterSupervisor](), settings = ClusterShardingSettings(system), extractEntityId = extractEntityId, extractShardId = extractShardId) - //#counter-supervisor-start + // #counter-supervisor-start } enterBarrier("extension-started") runOn(fifth) { - //#counter-usage + // #counter-usage val counterRegion: ActorRef = ClusterSharding(system).shardRegion("Counter") counterRegion ! Get(123) expectMsg(0) @@ -708,7 +709,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) counterRegion ! EntityEnvelope(123, Increment) counterRegion ! Get(123) expectMsg(1) - //#counter-usage + // #counter-usage ClusterSharding(system).shardRegion("AnotherCounter") ! EntityEnvelope(123, Decrement) ClusterSharding(system).shardRegion("AnotherCounter") ! Get(123) @@ -775,7 +776,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) var shard: ActorSelection = null var region: ActorSelection = null runOn(third) { - //Create an increment counter 1 + // Create an increment counter 1 persistentEntitiesRegion ! EntityEnvelope(1, Increment) persistentEntitiesRegion ! Get(1) expectMsg(1) @@ -793,30 +794,30 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("everybody-hand-off-ack") runOn(third) { - //Stop the shard cleanly + // Stop the shard cleanly region ! HandOff("1") expectMsg(10 seconds, "ShardStopped not received", ShardStopped("1")) val probe = TestProbe() awaitAssert({ - shard.tell(Identify(1), probe.ref) - probe.expectMsg(1 second, "Shard was still around", ActorIdentity(1, None)) - }, 5 seconds, 500 millis) + shard.tell(Identify(1), probe.ref) + probe.expectMsg(1 second, "Shard was still around", ActorIdentity(1, None)) + }, 5 seconds, 500 millis) - //Get the path to where the shard now resides + // Get the path to where the shard now resides awaitAssert({ - persistentEntitiesRegion ! Get(13) - expectMsg(0) - }, 5 seconds, 500 millis) + persistentEntitiesRegion ! Get(13) + expectMsg(0) + }, 5 seconds, 500 millis) - //Check that counter 1 is now alive again, even though we have + // Check that counter 1 is now alive again, even though we have // not sent a message to it via the ShardRegion val counter1 = system.actorSelection(lastSender.path.parent / "1") within(5.seconds) { awaitAssert { val p = TestProbe() counter1.tell(Identify(2), p.ref) - p.expectMsgType[ActorIdentity](2.seconds).ref should not be (None) + p.expectMsgType[ActorIdentity](2.seconds).ref should not be None } } @@ -826,14 +827,14 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("after-shard-restart") runOn(fourth) { - //Check a second region does not share the same persistent shards + // Check a second region does not share the same persistent shards - //Create a separate 13 counter + // Create a separate 13 counter anotherPersistentRegion ! EntityEnvelope(13, Increment) anotherPersistentRegion ! Get(13) expectMsg(1) - //Check that no counter "1" exists in this shard + // Check that no counter "1" exists in this shard val secondCounter1 = system.actorSelection(lastSender.path.parent / "1") secondCounter1 ! Identify(3) expectMsg(3 seconds, ActorIdentity(3, None)) @@ -849,7 +850,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("cluster-started-12") runOn(third) { - //Create and increment counter 1 + // Create and increment counter 1 persistentRegion ! EntityEnvelope(1, Increment) persistentRegion ! Get(1) expectMsg(1) @@ -858,7 +859,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) val shard = system.actorSelection(counter1.path.parent) val region = system.actorSelection(counter1.path.parent.parent) - //Create and increment counter 13 + // Create and increment counter 13 persistentRegion ! EntityEnvelope(13, Increment) persistentRegion ! Get(13) expectMsg(1) @@ -867,50 +868,50 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) counter1.path.parent should ===(counter13.path.parent) - //Send the shard the passivate message from the counter + // Send the shard the passivate message from the counter watch(counter1) shard.tell(Passivate(Stop), counter1) - //Watch for the terminated message + // Watch for the terminated message expectTerminated(counter1, 5 seconds) val probe1 = TestProbe() awaitAssert({ - //Check counter 1 is dead - counter1.tell(Identify(1), probe1.ref) - probe1.expectMsg(1 second, "Entity 1 was still around", ActorIdentity(1, None)) - }, 5 second, 500 millis) + // Check counter 1 is dead + counter1.tell(Identify(1), probe1.ref) + probe1.expectMsg(1 second, "Entity 1 was still around", ActorIdentity(1, None)) + }, 5 second, 500 millis) - //Stop the shard cleanly + // Stop the shard cleanly region ! HandOff("1") expectMsg(10 seconds, "ShardStopped not received", ShardStopped("1")) val probe2 = TestProbe() awaitAssert({ - shard.tell(Identify(2), probe2.ref) - probe2.expectMsg(1 second, "Shard was still around", ActorIdentity(2, None)) - }, 5 seconds, 500 millis) + shard.tell(Identify(2), probe2.ref) + probe2.expectMsg(1 second, "Shard was still around", ActorIdentity(2, None)) + }, 5 seconds, 500 millis) } enterBarrier("shard-shutdown-12") runOn(fourth) { - //Force the shard back up + // Force the shard back up persistentRegion ! Get(25) expectMsg(0) val shard = lastSender.path.parent - //Check counter 1 is still dead + // Check counter 1 is still dead system.actorSelection(shard / "1") ! Identify(3) expectMsg(ActorIdentity(3, None)) - //Check counter 13 is alive again + // Check counter 13 is alive again val probe3 = TestProbe() awaitAssert({ - system.actorSelection(shard / "13").tell(Identify(4), probe3.ref) - probe3.expectMsgType[ActorIdentity](1 second).ref should not be (None) - }, 5 seconds, 500 millis) + system.actorSelection(shard / "13").tell(Identify(4), probe3.ref) + probe3.expectMsgType[ActorIdentity](1 second).ref should not be None + }, 5 seconds, 500 millis) } enterBarrier("after-13") @@ -923,7 +924,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("cluster-started-12") runOn(third) { - //Create and increment counter 1 + // Create and increment counter 1 persistentRegion ! EntityEnvelope(1, Increment) persistentRegion ! Get(1) expectMsg(2) @@ -934,9 +935,9 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) val probe = TestProbe() awaitAssert({ - counter1.tell(Identify(1), probe.ref) - probe.expectMsgType[ActorIdentity](1 second).ref should not be (None) - }, 5.seconds, 500.millis) + counter1.tell(Identify(1), probe.ref) + probe.expectMsgType[ActorIdentity](1 second).ref should not be None + }, 5.seconds, 500.millis) } enterBarrier("after-14") @@ -944,7 +945,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) "be migrated to new regions upon region failure" in within(15.seconds) { - //Start only one region, and force an entity onto that region + // Start only one region, and force an entity onto that region runOn(third) { autoMigrateRegion ! EntityEnvelope(1, Increment) autoMigrateRegion ! Get(1) @@ -952,7 +953,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) } enterBarrier("shard1-region3") - //Start another region and test it talks to node 3 + // Start another region and test it talks to node 3 runOn(fourth) { autoMigrateRegion ! EntityEnvelope(1, Increment) @@ -960,20 +961,20 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) expectMsg(2) lastSender.path should ===(node(third) / "user" / "AutoMigrateRememberRegionTestRegion" / "1" / "1") - //Kill region 3 + // Kill region 3 system.actorSelection(lastSender.path.parent.parent) ! PoisonPill } enterBarrier("region4-up") // Wait for migration to happen - //Test the shard, thus counter was moved onto node 4 and started. + // Test the shard, thus counter was moved onto node 4 and started. runOn(fourth) { val counter1 = system.actorSelection(system / "AutoMigrateRememberRegionTestRegion" / "1" / "1") val probe = TestProbe() awaitAssert({ - counter1.tell(Identify(1), probe.ref) - probe.expectMsgType[ActorIdentity](1 second).ref should not be (None) - }, 5.seconds, 500 millis) + counter1.tell(Identify(1), probe.ref) + probe.expectMsgType[ActorIdentity](1 second).ref should not be None + }, 5.seconds, 500 millis) counter1 ! Get(1) expectMsg(2) @@ -1008,11 +1009,11 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) entity ! Identify(n) receiveOne(3 seconds) match { case ActorIdentity(id, Some(_)) if id == n => count = count + 1 - case ActorIdentity(_, None) => //Not on the fifth shard + case ActorIdentity(_, None) => // Not on the fifth shard case _ => fail() } } - count should be >= (2) + count should be >= 2 } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala index 57249fa193..cd3165e865 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala @@ -124,9 +124,9 @@ abstract class ExternalShardAllocationSpec runOn(second, third) { val probe = TestProbe() awaitAssert({ - shardRegion.tell(Get(shardToSpecifyLocation), probe.ref) - probe.expectMsg(Home(address(first))) - }, 10.seconds) + shardRegion.tell(Get(shardToSpecifyLocation), probe.ref) + probe.expectMsg(Home(address(first))) + }, 10.seconds) } enterBarrier("shard-allocated-to-specific-node") } @@ -145,9 +145,9 @@ abstract class ExternalShardAllocationSpec enterBarrier("forth-node-joined") runOn(first, second, third) { awaitAssert({ - shardRegion ! Get(initiallyOnForth) - expectMsg(Home(address(forth))) - }, 10.seconds) + shardRegion ! Get(initiallyOnForth) + expectMsg(Home(address(forth))) + }, 10.seconds) } enterBarrier("shard-allocated-to-forth") } @@ -160,9 +160,9 @@ abstract class ExternalShardAllocationSpec enterBarrier("shard-moved-from-forth-to-first") runOn(first, second, third, forth) { awaitAssert({ - shardRegion ! Get(initiallyOnForth) - expectMsg(Home(address(first))) - }, 10.seconds) + shardRegion ! Get(initiallyOnForth) + expectMsg(Home(address(first))) + }, 10.seconds) } enterBarrier("finished") } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala index 885020e3cd..7139856c87 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala @@ -47,7 +47,7 @@ object MultiDcClusterShardingSpec { object MultiDcClusterShardingSpecConfig extends MultiNodeClusterShardingConfig( - loglevel = "DEBUG", //issue #23741 + loglevel = "DEBUG", // issue #23741 additionalConfig = s""" akka.cluster { debug.verbose-heartbeat-logging = on @@ -114,10 +114,10 @@ abstract class MultiDcClusterShardingSpec private def assertCurrentRegions(expected: Set[Address]): Unit = { awaitAssert({ - val p = TestProbe() - region.tell(GetCurrentRegions, p.ref) - p.expectMsg(CurrentRegions(expected)) - }, 10.seconds) + val p = TestProbe() + region.tell(GetCurrentRegions, p.ref) + p.expectMsg(CurrentRegions(expected)) + }, 10.seconds) } "Cluster sharding in multi data center cluster" must { @@ -128,11 +128,11 @@ abstract class MultiDcClusterShardingSpec join(fourth, first) awaitAssert({ - withClue(s"Members: ${Cluster(system).state}") { - Cluster(system).state.members.size should ===(4) - Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - } - }, 10.seconds) + withClue(s"Members: ${Cluster(system).state}") { + Cluster(system).state.members.size should ===(4) + Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + } + }, 10.seconds) runOn(first, second) { assertCurrentRegions(Set(first, second).map(r => node(r).address)) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingConfig.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingConfig.scala index bb46f31ecf..7540013a3d 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingConfig.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingConfig.scala @@ -94,7 +94,8 @@ abstract class MultiNodeClusterShardingConfig( import MultiNodeClusterShardingConfig._ val targetDir = - s"target/ClusterSharding${testNameFromCallStack(classOf[MultiNodeClusterShardingConfig]).replace("Config", "").replace("_", "")}" + s"target/ClusterSharding${testNameFromCallStack(classOf[MultiNodeClusterShardingConfig]).replace("Config", + "").replace("_", "")}" val persistenceConfig: Config = if (mode == ClusterShardingSettings.StateStoreModeDData && rememberEntitiesStore != ClusterShardingSettings.RememberEntitiesStoreEventsourced) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala index 0e19c8e9b3..373506dbe4 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala @@ -173,7 +173,8 @@ abstract class MultiNodeClusterShardingSpec(val config: MultiNodeClusterSharding protected def persistenceIsNeeded: Boolean = mode == ClusterShardingSettings.StateStoreModePersistence || system.settings.config - .getString("akka.cluster.sharding.remember-entities-store") == ClusterShardingSettings.RememberEntitiesStoreEventsourced + .getString( + "akka.cluster.sharding.remember-entities-store") == ClusterShardingSettings.RememberEntitiesStoreEventsourced protected def setStoreIfNeeded(sys: ActorSystem, storeOn: RoleName): Unit = if (persistenceIsNeeded) setStore(sys, storeOn) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala index dd89fe7a20..062afb80e8 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala @@ -111,7 +111,7 @@ abstract class RollingUpdateShardAllocationSpec // so the folloing allocations end up as one on each node awaitAssert { shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (2) + expectMsgType[ShardRegion.CurrentRegions].regions should have size 2 } shardRegion ! GiveMeYourHome.Get("id1") @@ -123,7 +123,7 @@ abstract class RollingUpdateShardAllocationSpec val address2 = expectMsgType[GiveMeYourHome.Home].address // one on each node - Set(address1, address2) should have size (2) + Set(address1, address2) should have size 2 } enterBarrier("first-version-started") } @@ -140,7 +140,7 @@ abstract class RollingUpdateShardAllocationSpec // if we didn't the strategy will default it back to the old nodes awaitAssert { shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (3) + expectMsgType[ShardRegion.CurrentRegions].regions should have size 3 } } enterBarrier("third-region-registered") @@ -173,9 +173,9 @@ abstract class RollingUpdateShardAllocationSpec runOn(second, third, fourth) { awaitAssert({ - shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (3) - }, 30.seconds) + shardRegion ! ShardRegion.GetCurrentRegions + expectMsgType[ShardRegion.CurrentRegions].regions should have size 3 + }, 30.seconds) } enterBarrier("sharding-handed-off") @@ -198,9 +198,9 @@ abstract class RollingUpdateShardAllocationSpec runOn(third, fourth) { // make sure coordinator has noticed there are only two regions awaitAssert({ - shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (2) - }, 30.seconds) + shardRegion ! ShardRegion.GetCurrentRegions + expectMsgType[ShardRegion.CurrentRegions].regions should have size 2 + }, 30.seconds) } enterBarrier("second-left") diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala index c64e44fbf1..b61d805f15 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala @@ -136,9 +136,9 @@ class ClusterShardingLeaseSpec(config: Config, rememberEntities: Boolean) expectMsg(4) testLease.getCurrentCallback()(Option(LeaseFailed("oh dear"))) awaitAssert({ - region ! 4 - expectMsg(4) - }, max = 10.seconds) + region ! 4 + expectMsg(4) + }, max = 10.seconds) } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala index 5927fbd495..411b6d6ee6 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala @@ -47,9 +47,10 @@ object ConcurrentStartupShardingSpec { override def preStart(): Unit = { val region = - ClusterSharding(context.system).start(s"type-$n", Props.empty, ClusterShardingSettings(context.system), { - case msg => (msg.toString, msg) - }, _ => "1") + ClusterSharding(context.system).start(s"type-$n", Props.empty, ClusterShardingSettings(context.system), + { + case msg => (msg.toString, msg) + }, _ => "1") probe ! region } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala index 2e4521445b..925a7f9f47 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala @@ -92,16 +92,16 @@ class CoordinatedShutdownShardingSpec extends AkkaSpec(CoordinatedShutdownShardi // Using region 2 as it is not shutdown in either test def pingEntities(): Unit = { awaitAssert({ - val p1 = TestProbe()(sys2) - region2.tell(1, p1.ref) - p1.expectMsg(1.seconds, 1) - val p2 = TestProbe()(sys2) - region2.tell(2, p2.ref) - p2.expectMsg(1.seconds, 2) - val p3 = TestProbe()(sys2) - region2.tell(3, p3.ref) - p3.expectMsg(1.seconds, 3) - }, 10.seconds) + val p1 = TestProbe()(sys2) + region2.tell(1, p1.ref) + p1.expectMsg(1.seconds, 1) + val p2 = TestProbe()(sys2) + region2.tell(2, p2.ref) + p2.expectMsg(1.seconds, 2) + val p3 = TestProbe()(sys2) + region2.tell(3, p3.ref) + p3.expectMsg(1.seconds, 3) + }, 10.seconds) } "Sharding and CoordinatedShutdown" must { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala index 0060986cf5..3f2c9d79f1 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala @@ -97,7 +97,7 @@ class EntityTerminationSpec extends AkkaSpec(EntityTerminationSpec.config) with Thread.sleep(400) // restart backoff is 250 ms sharding ! ShardRegion.GetShardRegionState val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] - regionState.shards should have size (1) + regionState.shards should have size 1 regionState.shards.head.entityIds should be(Set("2")) // make sure the shard didn't crash (coverage for regression bug #29383) @@ -123,11 +123,11 @@ class EntityTerminationSpec extends AkkaSpec(EntityTerminationSpec.config) with Thread.sleep(400) // restart backoff is 250 ms awaitAssert({ - sharding ! ShardRegion.GetShardRegionState - val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] - regionState.shards should have size (1) - regionState.shards.head.entityIds should have size (1) - }, 2.seconds) + sharding ! ShardRegion.GetShardRegionState + val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] + regionState.shards should have size 1 + regionState.shards.head.entityIds should have size 1 + }, 2.seconds) } "allow terminating entity to passivate if remembering entities" in { @@ -149,8 +149,8 @@ class EntityTerminationSpec extends AkkaSpec(EntityTerminationSpec.config) with sharding ! ShardRegion.GetShardRegionState val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] - regionState.shards should have size (1) - regionState.shards.head.entityIds should have size (0) + regionState.shards should have size 1 + regionState.shards.head.entityIds should have size 0 } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala index a5c60e6b6c..b3824f16da 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala @@ -99,7 +99,8 @@ class LeastShardAllocationStrategyRandomizedSpec extends AkkaSpec("akka.loglevel () } else if (round == maxSteps) { fail( - s"Couldn't solve rebalance in $round rounds, [${newSteps.map(step => countShardsPerRegion(step).mkString(",")).mkString(" => ")}]") + s"Couldn't solve rebalance in $round rounds, [${newSteps.map(step => + countShardsPerRegion(step).mkString(",")).mkString(" => ")}]") } else { testRebalance(allocationStrategy, newAllocations, newSteps, maxSteps) } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala index b51b7a094c..257bced59e 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala @@ -47,8 +47,8 @@ object PersistentShardingMigrationSpec { akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/PersistentShardingMigrationSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.persistence.journal.leveldb { native = off dir = "target/journal-PersistentShardingMigrationSpec-${UUID.randomUUID()}" @@ -180,7 +180,7 @@ class PersistentShardingMigrationSpec extends AkkaSpec(PersistentShardingMigrati def assertRegionRegistrationComplete(region: ActorRef): Unit = { awaitAssert { region ! ShardRegion.GetCurrentRegions - expectMsgType[CurrentRegions].regions should have size (1) + expectMsgType[CurrentRegions].regions should have size 1 } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala index c1541ed5d7..fc35112615 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala @@ -272,9 +272,9 @@ class RememberEntitiesFailureSpec // it takes a while - timeout hits and then backoff awaitAssert({ - sharding.tell(EntityEnvelope(11, "hello-11-2"), probe.ref) - probe.expectMsg("hello-11-2") - }, 10.seconds) + sharding.tell(EntityEnvelope(11, "hello-11-2"), probe.ref) + probe.expectMsg("hello-11-2") + }, 10.seconds) system.stop(sharding) } @@ -308,9 +308,9 @@ class RememberEntitiesFailureSpec // it takes a while - timeout hits and then backoff awaitAssert({ - sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) - probe.expectMsg("hello-2") - }, 10.seconds) + sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) + probe.expectMsg("hello-2") + }, 10.seconds) system.stop(sharding) } @@ -349,9 +349,9 @@ class RememberEntitiesFailureSpec // it takes a while? awaitAssert({ - sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) - probe.expectMsg("hello-2") - }, 5.seconds) + sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) + probe.expectMsg("hello-2") + }, 5.seconds) system.stop(sharding) } @@ -388,9 +388,9 @@ class RememberEntitiesFailureSpec storeProbe.expectMsg(Done) probe.awaitAssert({ - sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) - probe.expectMsg("hello-2") // should now work again - }, 5.seconds) + sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) + probe.expectMsg("hello-2") // should now work again + }, 5.seconds) system.stop(sharding) } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala index 5faa8e5839..dedb3c39b0 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala @@ -39,8 +39,8 @@ object RememberEntitiesShardIdExtractorChangeSpec { akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/RememberEntitiesShardIdExtractorChangeSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.persistence.journal.leveldb { native = off dir = "target/journal-PersistentShardingMigrationSpec-${UUID.randomUUID()}" @@ -141,7 +141,7 @@ class RememberEntitiesShardIdExtractorChangeSpec def assertRegionRegistrationComplete(region: ActorRef): Unit = { awaitAssert { region ! ShardRegion.GetCurrentRegions - expectMsgType[CurrentRegions].regions should have size (1) + expectMsgType[CurrentRegions].regions should have size 1 } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala index f2ab4f0acc..0715c040d5 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala @@ -128,7 +128,7 @@ class ShardWithLeaseSpec extends AkkaSpec(ShardWithLeaseSpec.config) with WithLo .error( start = s"$typeName: Shard id [1] lease lost, stopping shard and killing [1] entities. Reason for losing lease: ${classOf[ - BadLease].getName}: bye bye lease", + BadLease].getName}: bye bye lease", occurrences = 1) .intercept { lease.getCurrentCallback().apply(Some(BadLease("bye bye lease"))) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala index e3dcf74c85..37b214d775 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala @@ -106,12 +106,12 @@ class StartEntitySpec extends AkkaSpec(StartEntitySpec.config) with ImplicitSend // should trigger start of entity again, and an ack expectMsg(ShardRegion.StartEntityAck("1", "1")) - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set("1")) - }) + } } } @@ -132,23 +132,23 @@ class StartEntitySpec extends AkkaSpec(StartEntitySpec.config) with ImplicitSend entity ! "just-stop" // Make sure the shard has processed the termination - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set.empty[String]) - }) + } // the backoff is 10s by default, so plenty time to // bypass region and send start entity directly to shard system.actorSelection(entity.path.parent) ! ShardRegion.StartEntity("1") expectMsg(ShardRegion.StartEntityAck("1", "1")) - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set("1")) - }) + } } } @@ -177,12 +177,12 @@ class StartEntitySpec extends AkkaSpec(StartEntitySpec.config) with ImplicitSend // regardless we should get an ack and the entity should be alive expectMsg(ShardRegion.StartEntityAck("1", "1")) - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set("1")) - }) + } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala index 0f7ef8fcde..a9ccf48f4b 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala @@ -34,8 +34,8 @@ object RememberEntitiesShardStoreSpec { akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/${classOf[RememberEntitiesShardStoreSpec].getName}-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" """.stripMargin) } @@ -102,8 +102,8 @@ abstract class RememberEntitiesShardStoreSpec store ! RememberEntitiesShardStore.Update((1 to 1000).map(_.toString).toSet, (1001 to 2000).map(_.toString).toSet) val response = expectMsgType[RememberEntitiesShardStore.UpdateDone] - response.started should have size (1000) - response.stopped should have size (1000) + response.started should have size 1000 + response.stopped should have size 1000 watch(store) system.stop(store) @@ -111,7 +111,7 @@ abstract class RememberEntitiesShardStoreSpec store = system.actorOf(storeProps("FakeShardIdLarge", "FakeTypeNameLarge", shardingSettings)) store ! RememberEntitiesShardStore.GetEntities - expectMsgType[RememberEntitiesShardStore.RememberedEntities].entities should have size (1000) + expectMsgType[RememberEntitiesShardStore.RememberedEntities].entities should have size 1000 } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala index f71449e1ca..70e2f06d29 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala @@ -181,13 +181,12 @@ object Simulator { .map(simulation.strategyCreator.preprocess) // note: mutable state in strategy creator .fold(immutable.Queue.empty[Access])((collected, access) => if (simulation.accessPattern.isSynthetic) collected.enqueue(access) else collected) - .flatMapConcat( - collectedAccesses => - if (simulation.accessPattern.isSynthetic) - Source(collectedAccesses) // use the exact same randomly generated accesses - else - simulation.accessPattern.entityIds.via( // re-read the access pattern - ShardAllocation(simulation.numberOfShards, simulation.numberOfRegions))) + .flatMapConcat(collectedAccesses => + if (simulation.accessPattern.isSynthetic) + Source(collectedAccesses) // use the exact same randomly generated accesses + else + simulation.accessPattern.entityIds.via( // re-read the access pattern + ShardAllocation(simulation.numberOfShards, simulation.numberOfRegions))) .via(ShardingState(simulation.strategyCreator)) .runWith(SimulatorStats()) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala index 3f02c62fe9..b0542b2cfa 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala @@ -83,14 +83,16 @@ object DataTable { f"${stats.passivations}%,d") def apply(stats: RegionStats): DataTable = - DataTable(Headers.RegionStats, stats.shardStats.toSeq.sortBy(_._1).flatMap { - case (shardId, stats) => DataTable(stats).rows.map(shardId +: _) - }) + DataTable(Headers.RegionStats, + stats.shardStats.toSeq.sortBy(_._1).flatMap { + case (shardId, stats) => DataTable(stats).rows.map(shardId +: _) + }) def apply(stats: ShardingStats): DataTable = - DataTable(Headers.ShardingStats, stats.regionStats.toSeq.sortBy(_._1).flatMap { - case (regionId, stats) => DataTable(stats).rows.map(regionId +: _) - }) + DataTable(Headers.ShardingStats, + stats.regionStats.toSeq.sortBy(_._1).flatMap { + case (regionId, stats) => DataTable(stats).rows.map(regionId +: _) + }) } object PrintData { @@ -133,7 +135,7 @@ object PrintData { columnWidths.map(width => line * (width + 2)).mkString(start, separator, end) + "\n" private def line(row: DataTable.Row, columnWidths: Seq[Int]): String = - row.zip(columnWidths).map({ case (cell, width) => pad(cell, width) }).mkString("║ ", " │ ", " ║") + "\n" + row.zip(columnWidths).map { case (cell, width) => pad(cell, width) }.mkString("║ ", " │ ", " ║") + "\n" private def pad(string: String, width: Int): String = " " * (width - string.length) + string diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala index e086ae2e79..a8eba44dc7 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala @@ -940,7 +940,6 @@ object ClusterReceptionist { * since the client should normally send subsequent messages via the `ClusterClient`. * It is possible to pass the original sender inside the reply messages if * the client is supposed to communicate directly to the actor in the cluster. - * */ @deprecated( "Use Akka gRPC instead, see https://doc.akka.io/docs/akka/2.6/cluster-client.html#migration-to-akka-grpc", @@ -1071,7 +1070,6 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep } case _: MemberEvent => // not of interest - case SubscribeClusterClients => val subscriber = sender() subscriber ! ClusterClients(clientInteractions.keySet.to(HashSet)) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala index fe416a60b1..e658bdce99 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala @@ -577,7 +577,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) val removedTimeToLiveMillis = removedTimeToLive.toMillis - //Start periodic gossip to random nodes in cluster + // Start periodic gossip to random nodes in cluster import context.dispatcher val gossipTask = context.system.scheduler.scheduleWithFixedDelay(gossipInterval, gossipInterval, self, GossipTick) val pruneInterval: FiniteDuration = removedTimeToLive / 2 @@ -774,7 +774,6 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) } case _: MemberEvent => // not of interest - case Count => val count = registry.map { case (_, bucket) => @@ -862,7 +861,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) def mkKey(path: ActorPath): String = Internal.mkKey(path) - def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) => (owner -> bucket.version) } + def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) => owner -> bucket.version } def collectDelta(otherVersions: Map[Address, Long]): immutable.Iterable[Bucket] = { // missing entries are represented by version 0 @@ -908,7 +907,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) registry.foreach { case (owner, bucket) => val oldRemoved = bucket.content.collect { - case (key, ValueHolder(version, None)) if (bucket.version - version > removedTimeToLiveMillis) => key + case (key, ValueHolder(version, None)) if bucket.version - version > removedTimeToLiveMillis => key } if (oldRemoved.nonEmpty) registry += owner -> bucket.copy(content = bucket.content -- oldRemoved) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index 9a4ce686df..087d0b8751 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -48,8 +48,8 @@ object ClusterSingletonManagerSettings { */ def apply(system: ActorSystem): ClusterSingletonManagerSettings = apply(system.settings.config.getConfig("akka.cluster.singleton")) - // note that this setting has some additional logic inside the ClusterSingletonManager - // falling back to DowningProvider.downRemovalMargin if it is off/Zero + // note that this setting has some additional logic inside the ClusterSingletonManager + // falling back to DowningProvider.downRemovalMargin if it is off/Zero .withRemovalMargin(Cluster(system).settings.DownRemovalMargin) /** @@ -464,7 +464,6 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess * * Not intended for subclassing by user code. * - * * @param singletonProps [[akka.actor.Props]] of the singleton actor instance. * * @param terminationMessage When handing over to a new oldest node @@ -498,10 +497,9 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se override val log: MarkerLoggingAdapter = Logging.withMarker(context.system, this) - val lease: Option[Lease] = settings.leaseSettings.map( - settings => - LeaseProvider(context.system) - .getLease(singletonLeaseName, settings.leaseImplementation, cluster.selfAddress.hostPort)) + val lease: Option[Lease] = settings.leaseSettings.map(settings => + LeaseProvider(context.system) + .getLease(singletonLeaseName, settings.leaseImplementation, cluster.selfAddress.hostPort)) val leaseRetryInterval: FiniteDuration = settings.leaseSettings match { case Some(s) => s.leaseRetryInterval case None => 5.seconds // won't be used diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index b136b096d0..46bfcb56a5 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -272,7 +272,6 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste else remove(m) case _: MemberEvent => // do nothing - // singleton identification logic case ActorIdentity(_, Some(s)) => // if the new singleton is defined, deliver all buffered messages diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala index 1a2ebc6346..8d78354ca7 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala @@ -30,15 +30,19 @@ private[akka] class ClusterSingletonMessageSerializer(val system: ExtendedActorS private val emptyByteArray = Array.empty[Byte] - private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](HandOverToMeManifest -> { _ => - HandOverToMe - }, HandOverInProgressManifest -> { _ => - HandOverInProgress - }, HandOverDoneManifest -> { _ => - HandOverDone - }, TakeOverFromMeManifest -> { _ => - TakeOverFromMe - }) + private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef]( + HandOverToMeManifest -> { _ => + HandOverToMe + }, + HandOverInProgressManifest -> { _ => + HandOverInProgress + }, + HandOverDoneManifest -> { _ => + HandOverDone + }, + TakeOverFromMeManifest -> { _ => + TakeOverFromMe + }) override def manifest(obj: AnyRef): String = obj match { case HandOverToMe => HandOverToMeManifest diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala index 93e2461aea..902f688fa5 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala @@ -74,7 +74,7 @@ object ClusterClientSpec extends MultiNodeConfig { } } - //#clientEventsListener + // #clientEventsListener class ClientListener(targetClient: ActorRef) extends Actor { override def preStart(): Unit = targetClient ! SubscribeContactPoints @@ -94,7 +94,7 @@ object ClusterClientSpec extends MultiNodeConfig { // Now do something with an up-to-date "contactPoints - cp" } } - //#clientEventsListener + // #clientEventsListener object TestClientListener { case object GetLatestContactPoints @@ -113,7 +113,7 @@ object ClusterClientSpec extends MultiNodeConfig { } } - //#receptionistEventsListener + // #receptionistEventsListener class ReceptionistListener(targetReceptionist: ActorRef) extends Actor { override def preStart(): Unit = targetReceptionist ! SubscribeClusterClients @@ -133,7 +133,7 @@ object ClusterClientSpec extends MultiNodeConfig { // Now do something with an up-to-date "clusterClients - c" } } - //#receptionistEventsListener + // #receptionistEventsListener object TestReceptionistListener { case object GetLatestClusterClients @@ -192,13 +192,13 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod } @unused - def docOnly = { //not used, only demo - //#initialContacts + def docOnly = { // not used, only demo + // #initialContacts val initialContacts = Set( ActorPath.fromString("akka://OtherSys@host1:2552/system/receptionist"), ActorPath.fromString("akka://OtherSys@host2:2552/system/receptionist")) val settings = ClusterClientSettings(system).withInitialContacts(initialContacts) - //#initialContacts + // #initialContacts // make the compiler happy and thinking we use it settings.acceptableHeartbeatPause @@ -261,7 +261,7 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod def host2 = second def host3 = third - //#server + // #server runOn(host1) { val serviceA = system.actorOf(Props[Service](), "serviceA") ClusterClientReceptionist(system).registerService(serviceA) @@ -271,14 +271,14 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod val serviceB = system.actorOf(Props[Service](), "serviceB") ClusterClientReceptionist(system).registerService(serviceB) } - //#server + // #server runOn(host1, host2, host3, fourth) { awaitCount(4) } enterBarrier("services-replicated") - //#client + // #client runOn(client) { val c = system.actorOf( ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), @@ -286,7 +286,7 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod c ! ClusterClient.Send("/user/serviceA", "hello", localAffinity = true) c ! ClusterClient.SendToAll("/user/serviceB", "hi") } - //#client + // #client runOn(client) { // note that "hi" was sent to 2 "serviceB" @@ -306,9 +306,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod val expectedContacts = Set(first, second, third, fourth).map(node(_) / "system" / "receptionist") awaitAssert({ - listener ! TestClientListener.GetLatestContactPoints - expectMsgType[LatestContactPoints].contactPoints should ===(expectedContacts) - }, max = 10.seconds) + listener ! TestClientListener.GetLatestContactPoints + expectMsgType[LatestContactPoints].contactPoints should ===(expectedContacts) + }, max = 10.seconds) } enterBarrier("reporter-client-listener-tested") @@ -327,11 +327,11 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod val expectedClient = Await.result(system.actorSelection(node(client) / "user" / "client").resolveOne(), timeout.duration) awaitAssert({ - val probe = TestProbe() - l.tell(TestReceptionistListener.GetLatestClusterClients, probe.ref) - // "ask-client" might still be around, filter - probe.expectMsgType[LatestClusterClients].clusterClients should contain(expectedClient) - }, max = 10.seconds) + val probe = TestProbe() + l.tell(TestReceptionistListener.GetLatestClusterClients, probe.ref) + // "ask-client" might still be around, filter + probe.expectMsgType[LatestClusterClients].clusterClients should contain(expectedClient) + }, max = 10.seconds) } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala index 9335de1ed7..09dd273871 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala @@ -63,7 +63,7 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { } } - //#publisher + // #publisher class Publisher extends Actor { import DistributedPubSubMediator.Publish // activate the extension @@ -75,9 +75,9 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { mediator ! Publish("content", out) } } - //#publisher + // #publisher - //#subscriber + // #subscriber class Subscriber extends Actor with ActorLogging { import DistributedPubSubMediator.{ Subscribe, SubscribeAck } val mediator = DistributedPubSub(context.system).mediator @@ -91,9 +91,9 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { log.info("subscribing") } } - //#subscriber + // #subscriber - //#sender + // #sender class Sender extends Actor { import DistributedPubSubMediator.Send // activate the extension @@ -105,9 +105,9 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { mediator ! Send(path = "/user/destination", msg = out, localAffinity = true) } } - //#sender + // #sender - //#send-destination + // #send-destination class Destination extends Actor with ActorLogging { import DistributedPubSubMediator.Put val mediator = DistributedPubSub(context.system).mediator @@ -119,7 +119,7 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { log.info("Got {}", s) } } - //#send-destination + // #send-destination } @@ -347,7 +347,7 @@ class DistributedPubSubMediatorSpec awaitCount(10) } - //#start-subscribers + // #start-subscribers runOn(first) { system.actorOf(Props[Subscriber](), "subscriber1") } @@ -355,16 +355,16 @@ class DistributedPubSubMediatorSpec system.actorOf(Props[Subscriber](), "subscriber2") system.actorOf(Props[Subscriber](), "subscriber3") } - //#start-subscribers + // #start-subscribers - //#publish-message + // #publish-message runOn(third) { val publisher = system.actorOf(Props[Publisher](), "publisher") later() // after a while the subscriptions are replicated publisher ! "hello" } - //#publish-message + // #publish-message enterBarrier("after-8") } @@ -374,23 +374,23 @@ class DistributedPubSubMediatorSpec awaitCount(12) } - //#start-send-destinations + // #start-send-destinations runOn(first) { system.actorOf(Props[Destination](), "destination") } runOn(second) { system.actorOf(Props[Destination](), "destination") } - //#start-send-destinations + // #start-send-destinations - //#send-message + // #send-message runOn(third) { val sender = system.actorOf(Props[Sender](), "sender") later() // after a while the destinations are replicated sender ! "hello" } - //#send-message + // #send-message enterBarrier("after-8") } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala index 1b2cf7e5b6..3bd40b69bc 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala @@ -177,24 +177,24 @@ class ClusterSingletonManagerLeaseSpec runOn(controller) { cluster.down(address(first)) awaitAssert({ - cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) - }, 20.seconds) + cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) + }, 20.seconds) val requests = awaitAssert({ - TestLeaseActorClientExt(system).getLeaseActor() ! GetRequests - val msg = expectMsgType[LeaseRequests] - withClue("Requests: " + msg) { - msg.requests.size shouldEqual 2 - } - msg - }, 10.seconds) + TestLeaseActorClientExt(system).getLeaseActor() ! GetRequests + val msg = expectMsgType[LeaseRequests] + withClue("Requests: " + msg) { + msg.requests.size shouldEqual 2 + } + msg + }, 10.seconds) requests.requests should contain(Release(address(first).hostPort)) requests.requests should contain(Acquire(address(second).hostPort)) } runOn(second, third, fourth) { awaitAssert({ - cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) - }, 20.seconds) + cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) + }, 20.seconds) } enterBarrier("first node downed") val proxy = system.actorOf( diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala index 9e604907e1..a8a5a668d3 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala @@ -190,7 +190,7 @@ class ClusterSingletonManagerLeave2Spec p.within(15.seconds) { p.awaitAssert { echoProxy.tell("hello2", p.ref) - p.expectMsgType[ActorRef](1.seconds).path.address should not be (firstAddress) + p.expectMsgType[ActorRef](1.seconds).path.address should not be firstAddress } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala index 02ed187bc7..72f1dd9549 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala @@ -149,7 +149,7 @@ class ClusterSingletonManagerLeaveSpec p.within(15.seconds) { p.awaitAssert { echoProxy.tell("hello2", p.ref) - p.expectMsgType[ActorRef](1.seconds).path.address should not be (firstAddress) + p.expectMsgType[ActorRef](1.seconds).path.address should not be firstAddress } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala index 1b54ab13ca..b8bb100517 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala @@ -111,10 +111,10 @@ class ClusterSingletonManagerPreparingForShutdownSpec Cluster(system).prepareForFullClusterShutdown() } awaitAssert({ - withClue("members: " + Cluster(system).readView.members) { - Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown - } - }, 10.seconds) + withClue("members: " + Cluster(system).readView.members) { + Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown + } + }, 10.seconds) enterBarrier("preparation-complete") runOn(first) { @@ -153,10 +153,10 @@ class ClusterSingletonManagerPreparingForShutdownSpec Cluster(system).leave(address(second)) } awaitAssert({ - withClue("self member: " + Cluster(system).selfMember) { - Cluster(system).selfMember.status shouldEqual Removed - } - }, 10.seconds) + withClue("self member: " + Cluster(system).selfMember) { + Cluster(system).selfMember.status shouldEqual Removed + } + }, 10.seconds) enterBarrier("done") } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala index bdcffa4eeb..25a48bacab 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala @@ -48,10 +48,10 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { nodeConfig(first, second, third, fourth, fifth, sixth)(ConfigFactory.parseString("akka.cluster.roles =[worker]")) - //#singleton-message-classes + // #singleton-message-classes object PointToPointChannel { case object UnregistrationOk extends CborSerializable - //#singleton-message-classes + // #singleton-message-classes case object RegisterConsumer extends CborSerializable case object UnregisterConsumer extends CborSerializable case object RegistrationOk extends CborSerializable @@ -59,9 +59,9 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { case object UnexpectedUnregistration extends CborSerializable case object Reset extends CborSerializable case object ResetOk extends CborSerializable - //#singleton-message-classes + // #singleton-message-classes } - //#singleton-message-classes + // #singleton-message-classes /** * This channel is extremely strict with regards to @@ -108,14 +108,14 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { } } - //#singleton-message-classes + // #singleton-message-classes object Consumer { case object End extends CborSerializable case object GetCurrent extends CborSerializable case object Ping extends CborSerializable case object Pong extends CborSerializable } - //#singleton-message-classes + // #singleton-message-classes /** * The Singleton actor @@ -145,7 +145,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { delegateTo ! message case GetCurrent => sender() ! current - //#consumer-end + // #consumer-end case End => queue ! UnregisterConsumer case UnregistrationOk => @@ -153,7 +153,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { context.stop(self) case Ping => sender() ! Pong - //#consumer-end + // #consumer-end } } @@ -218,35 +218,35 @@ class ClusterSingletonManagerSpec } def createSingleton(): ActorRef = { - //#create-singleton-manager + // #create-singleton-manager system.actorOf( ClusterSingletonManager.props( singletonProps = Props(classOf[Consumer], queue, testActor), terminationMessage = End, settings = ClusterSingletonManagerSettings(system).withRole("worker")), name = "consumer") - //#create-singleton-manager + // #create-singleton-manager } def createSingletonProxy(): ActorRef = { - //#create-singleton-proxy + // #create-singleton-proxy val proxy = system.actorOf( ClusterSingletonProxy.props( singletonManagerPath = "/user/consumer", settings = ClusterSingletonProxySettings(system).withRole("worker")), name = "consumerProxy") - //#create-singleton-proxy + // #create-singleton-proxy proxy } def createSingletonProxyDc(): ActorRef = { - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc val proxyDcB = system.actorOf( ClusterSingletonProxy.props( singletonManagerPath = "/user/consumer", settings = ClusterSingletonProxySettings(system).withDataCenter("B")), name = "consumerProxyDcB") - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc proxyDcB } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala index c5db8da0e4..4390b4fd7b 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala @@ -48,8 +48,8 @@ import akka.util.Timeout def withState( subscribeAdapters: Map[ - ActorRef[JReplicator.SubscribeResponse[ReplicatedData]], - ActorRef[dd.Replicator.SubscribeResponse[ReplicatedData]]]): Behavior[SReplicator.Command] = { + ActorRef[JReplicator.SubscribeResponse[ReplicatedData]], ActorRef[dd.Replicator.SubscribeResponse[ + ReplicatedData]]]): Behavior[SReplicator.Command] = { def stopSubscribeAdapter( subscriber: ActorRef[JReplicator.SubscribeResponse[ReplicatedData]]): Behavior[SReplicator.Command] = { diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala index 188e55aa6c..93fa455aae 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala @@ -10,8 +10,8 @@ import akka.actor.typed._ import akka.annotation.{ DoNotInherit, InternalApi } import akka.cluster.ClusterSettings.DataCenter import akka.cluster.singleton.{ - ClusterSingletonProxySettings, - ClusterSingletonManagerSettings => ClassicClusterSingletonManagerSettings + ClusterSingletonManagerSettings => ClassicClusterSingletonManagerSettings, + ClusterSingletonProxySettings } import akka.cluster.typed.internal.AdaptedClusterSingletonImpl import akka.coordination.lease.LeaseUsageSettings diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala index ae20315b01..510317a86a 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala @@ -14,7 +14,7 @@ import akka.actor.typed.internal.adapter.ActorSystemAdapter import akka.actor.typed.scaladsl.Behaviors import akka.annotation.InternalApi import akka.cluster.ClusterSettings.DataCenter -import akka.cluster.singleton.{ ClusterSingletonProxy, ClusterSingletonManager => OldSingletonManager } +import akka.cluster.singleton.{ ClusterSingletonManager => OldSingletonManager, ClusterSingletonProxy } import akka.cluster.typed import akka.cluster.typed.{ Cluster, ClusterSingleton, ClusterSingletonImpl, ClusterSingletonSettings } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala index ecb7b5160b..739e27695b 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala @@ -31,9 +31,6 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, * * Two level structure for keeping service registry to be able to shard entries over multiple ddata keys (to not * get too large ddata messages) - * - - * */ @InternalApi private[akka] final case class ShardedServiceRegistry( serviceRegistries: Map[DDataKey, ServiceRegistry], diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala index 51dadc3eeb..82263bda2a 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala @@ -66,10 +66,10 @@ abstract class PubSubSpec extends MultiNodeSpec(PubSubSpecConfig) with MultiNode "see nodes with subscribers registered" in { val statsProbe = TestProbe[Topic.TopicStats]() - statsProbe.awaitAssert({ + statsProbe.awaitAssert { topic ! Topic.GetTopicStats[Message](statsProbe.ref) statsProbe.receiveMessage().topicInstanceCount should ===(3) - }) + } enterBarrier("topic instances with subscribers seen") } @@ -91,10 +91,10 @@ abstract class PubSubSpec extends MultiNodeSpec(PubSubSpecConfig) with MultiNode topic ! Topic.Unsubscribe(topicProbe.ref) // unsubscribe does not need to be gossiped before it is effective val statsProbe = TestProbe[Topic.TopicStats]() - statsProbe.awaitAssert({ + statsProbe.awaitAssert { topic ! Topic.GetTopicStats[Message](statsProbe.ref) statsProbe.receiveMessage().topicInstanceCount should ===(2) - }) + } } enterBarrier("unsubscribed") Thread.sleep(200) // but it needs to reach the topic diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala index 6afbd987e8..89522d0058 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala @@ -68,21 +68,21 @@ abstract class ClusterReceptionistUnreachabilitySpec "register a service" in { val localServiceRef = spawn(Behaviors.receiveMessage[String] { - case msg => - probe.ref ! msg - Behaviors.same - }, "my-service") + case msg => + probe.ref ! msg + Behaviors.same + }, "my-service") typedSystem.receptionist ! Receptionist.Register(MyServiceKey, localServiceRef) enterBarrier("all registered") } "see registered services" in { awaitAssert({ - val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (3) - listing.allServiceInstances(MyServiceKey) should have size (3) - listing.servicesWereAddedOrRemoved should ===(true) - }, 20.seconds) + val listing = receptionistProbe.expectMessageType[Receptionist.Listing] + listing.serviceInstances(MyServiceKey) should have size 3 + listing.allServiceInstances(MyServiceKey) should have size 3 + listing.servicesWereAddedOrRemoved should ===(true) + }, 20.seconds) enterBarrier("all seen registered") } @@ -97,20 +97,20 @@ abstract class ClusterReceptionistUnreachabilitySpec runOn(first, third) { // assert service on 2 is not in listing but in all and flag is false awaitAssert({ - val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (2) - listing.allServiceInstances(MyServiceKey) should have size (3) - listing.servicesWereAddedOrRemoved should ===(false) - }, 20.seconds) + val listing = receptionistProbe.expectMessageType[Receptionist.Listing] + listing.serviceInstances(MyServiceKey) should have size 2 + listing.allServiceInstances(MyServiceKey) should have size 3 + listing.servicesWereAddedOrRemoved should ===(false) + }, 20.seconds) } runOn(second) { // assert service on 1 and 3 is not in listing but in all and flag is false awaitAssert({ - val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (1) - listing.allServiceInstances(MyServiceKey) should have size (3) - listing.servicesWereAddedOrRemoved should ===(false) - }, 20.seconds) + val listing = receptionistProbe.expectMessageType[Receptionist.Listing] + listing.serviceInstances(MyServiceKey) should have size 1 + listing.allServiceInstances(MyServiceKey) should have size 3 + listing.servicesWereAddedOrRemoved should ===(false) + }, 20.seconds) } enterBarrier("all seen unreachable") } @@ -122,12 +122,12 @@ abstract class ClusterReceptionistUnreachabilitySpec testConductor.passThrough(third, second, Direction.Both).await } - awaitAssert({ + awaitAssert { val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (3) - listing.allServiceInstances(MyServiceKey) should have size (3) + listing.serviceInstances(MyServiceKey) should have size 3 + listing.allServiceInstances(MyServiceKey) should have size 3 listing.servicesWereAddedOrRemoved should ===(false) - }) + } enterBarrier("all seen reachable-again") } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala b/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala index 20b9faf979..1143b4b685 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala @@ -50,7 +50,7 @@ object ReplicatorCompileOnlyTest { val replyTo: ActorRef[Int] = ??? val key = GCounterKey("counter") - //#curried-update + // #curried-update // alternative way to define the `createRequest` function // Replicator.Update instance has a curried `apply` method replicatorAdapter.askUpdate( @@ -61,9 +61,9 @@ object ReplicatorCompileOnlyTest { replicatorAdapter.askUpdate( askReplyTo => Replicator.Update(key, GCounter.empty, Replicator.WriteLocal, askReplyTo)(_ :+ 1), InternalUpdateResponse.apply) - //#curried-update + // #curried-update - //#curried-get + // #curried-get // alternative way to define the `createRequest` function // Replicator.Get instance has a curried `apply` method replicatorAdapter.askGet(Replicator.Get(key, Replicator.ReadLocal), value => InternalGetResponse(value, replyTo)) @@ -72,7 +72,7 @@ object ReplicatorCompileOnlyTest { replicatorAdapter.askGet( askReplyTo => Replicator.Get(key, Replicator.ReadLocal, askReplyTo), value => InternalGetResponse(value, replyTo)) - //#curried-get + // #curried-get } def shouldHaveUnapplyForResponseTypes(): Unit = { diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala index 67d4b1b8bf..d61aa1f8ef 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala @@ -110,10 +110,11 @@ class ActorSystemSpec "An ActorSystem" must { "start the guardian actor and terminate when it terminates" in { - withSystem("a", Behaviors.receiveMessage[Probe] { p => - p.replyTo ! p.message - Behaviors.stopped - }, doTerminate = false) { sys => + withSystem("a", + Behaviors.receiveMessage[Probe] { p => + p.replyTo ! p.message + Behaviors.stopped + }, doTerminate = false) { sys => val inbox = TestInbox[String]("a") sys ! Probe("hello", inbox.ref) eventually { @@ -140,13 +141,13 @@ class ActorSystemSpec "terminate the guardian actor" in { val inbox = TestInbox[String]("terminate") val sys = system(Behaviors.setup[Any] { _ => - inbox.ref ! "started" - Behaviors.receiveSignal { - case (_, PostStop) => - inbox.ref ! "done" - Behaviors.same - } - }, "terminate") + inbox.ref ! "started" + Behaviors.receiveSignal { + case (_, PostStop) => + inbox.ref ! "done" + Behaviors.same + } + }, "terminate") eventually { inbox.hasMessages should ===(true) @@ -162,8 +163,8 @@ class ActorSystemSpec "be able to terminate immediately" in { val sys = system(Behaviors.receiveMessage[Probe] { _ => - Behaviors.unhandled - }, "terminate") + Behaviors.unhandled + }, "terminate") // for this case the guardian might not have been started before // the system terminates and then it will not receive PostStop, which // is OK since it wasn't really started yet diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala index 0f1a2895c6..4fdcc6f184 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala @@ -97,14 +97,14 @@ class ClusterSingletonApiSpec val node2PongProbe = TestProbe[Pong.type]()(adaptedSystem2) node1PongProbe.awaitAssert({ - node1ref ! Ping(node1PongProbe.ref) - node1PongProbe.expectMessage(Pong) - }, 3.seconds) + node1ref ! Ping(node1PongProbe.ref) + node1PongProbe.expectMessage(Pong) + }, 3.seconds) node2PongProbe.awaitAssert({ - node2ref ! Ping(node2PongProbe.ref) - node2PongProbe.expectMessage(Pong) - }, 3.seconds) + node2ref ! Ping(node2PongProbe.ref) + node2PongProbe.expectMessage(Pong) + }, 3.seconds) } } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala index ea41169596..b33e31c4a3 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala @@ -96,33 +96,33 @@ class GroupRouterSpec extends ScalaTestWithActorTestKit(GroupRouterSpec.config) val resultProbe = testKit.createTestProbe[Pinger.DonePinging]() val system1 = ActorSystem(Behaviors.setup[Receptionist.Listing] { - ctx => - (0 until settings.node1WorkerCount).foreach { i => + ctx => + (0 until settings.node1WorkerCount).foreach { i => + val worker = ctx.spawn(PingActor(), s"ping-pong-$i") + ctx.system.receptionist ! Receptionist.Register(pingPongKey, worker) + } + ctx.system.receptionist ! Receptionist.Subscribe(pingPongKey, ctx.self) + Behaviors.receiveMessage { + case pingPongKey.Listing(update) if update.size == settings.node1WorkerCount + settings.node2WorkerCount => + // the requested number of workers are started and registered with the receptionist + // a new router will see all after this has been observed + ctx.log.debug("Saw {} workers, starting router and pinger", update.size) + val router = ctx.spawn(groupRouter, "group-router") + ctx.spawn(Pinger(router, settings.messageCount, resultProbe.ref), "pinger") + // ignore further listings + Behaviors.empty + case _ => + Behaviors.same + } + }, system.name, config) + + val system2 = ActorSystem(Behaviors.setup[Unit] { ctx => + (0 until settings.node2WorkerCount).foreach { i => val worker = ctx.spawn(PingActor(), s"ping-pong-$i") ctx.system.receptionist ! Receptionist.Register(pingPongKey, worker) } - ctx.system.receptionist ! Receptionist.Subscribe(pingPongKey, ctx.self) - Behaviors.receiveMessage { - case pingPongKey.Listing(update) if update.size == settings.node1WorkerCount + settings.node2WorkerCount => - // the requested number of workers are started and registered with the receptionist - // a new router will see all after this has been observed - ctx.log.debug("Saw {} workers, starting router and pinger", update.size) - val router = ctx.spawn(groupRouter, "group-router") - ctx.spawn(Pinger(router, settings.messageCount, resultProbe.ref), "pinger") - // ignore further listings - Behaviors.empty - case _ => - Behaviors.same - } - }, system.name, config) - - val system2 = ActorSystem(Behaviors.setup[Unit] { ctx => - (0 until settings.node2WorkerCount).foreach { i => - val worker = ctx.spawn(PingActor(), s"ping-pong-$i") - ctx.system.receptionist ! Receptionist.Register(pingPongKey, worker) - } - Behaviors.empty - }, system.name, config) + Behaviors.empty + }, system.name, config) try { val node1 = Cluster(system1) diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala index cbd75ed20d..0495a5106f 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala @@ -63,8 +63,8 @@ class RemoteDeployNotAllowedSpec // this should throw try { ctx.spawn(Behaviors.setup[AnyRef] { _ => - Behaviors.empty - }, name) + Behaviors.empty + }, name) } catch { case ex: Exception => probe.ref ! ex } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala index 1ff392de6d..efff4d9792 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala @@ -68,9 +68,9 @@ class RemoteMessageSpec extends AkkaSpec(RemoteMessageSpec.config) { val pongPromise = Promise[Done]() val recipient = system2.spawn(Behaviors.receive[String] { (_, _) => - pongPromise.success(Done) - Behaviors.stopped - }, "recipient") + pongPromise.success(Done) + Behaviors.stopped + }, "recipient") remoteRef ! Ping(recipient) pingPromise.future.futureValue should ===(Done) diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala index 9cfd260e17..2d0a674d94 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala @@ -203,9 +203,9 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin } regProbe1.awaitAssert({ - // we will also potentially get an update that the service was unreachable before the expected one - regProbe1.expectMessage(10.seconds, Listing(PingKey, Set(service1))) - }, 10.seconds) + // we will also potentially get an update that the service was unreachable before the expected one + regProbe1.expectMessage(10.seconds, Listing(PingKey, Set(service1))) + }, 10.seconds) // register another after removal val service1b = testKit1.spawn(pingPongBehavior) @@ -258,9 +258,9 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin clusterNode2.manager ! Down(clusterNode1.selfMember.address) // service1 removed regProbe2.awaitAssert({ - // we will also potentially get an update that the service was unreachable before the expected one - regProbe2.expectMessage(10.seconds, Listing(PingKey, Set(service2))) - }, 10.seconds) + // we will also potentially get an update that the service was unreachable before the expected one + regProbe2.expectMessage(10.seconds, Listing(PingKey, Set(service2))) + }, 10.seconds) } finally { testKit1.shutdownTestKit() testKit2.shutdownTestKit() @@ -317,9 +317,9 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin clusterNode1.manager ! Down(clusterNode2.selfMember.address) regProbe1.awaitAssert({ - // we will also potentially get an update that the service was unreachable before the expected one - regProbe1.expectMessage(10.seconds, Listing(PingKey, Set.empty[ActorRef[PingProtocol]])) - }, 10.seconds) + // we will also potentially get an update that the service was unreachable before the expected one + regProbe1.expectMessage(10.seconds, Listing(PingKey, Set.empty[ActorRef[PingProtocol]])) + }, 10.seconds) } finally { testKit1.shutdownTestKit() testKit2.shutdownTestKit() @@ -390,11 +390,10 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin // make sure it joined fine and node1 has upped it regProbe1.awaitAssert( { - clusterNode1.state.members.exists( - m => - m.uniqueAddress == clusterNode3.selfMember.uniqueAddress && - m.status == MemberStatus.Up && - !clusterNode1.state.unreachable(m)) should ===(true) + clusterNode1.state.members.exists(m => + m.uniqueAddress == clusterNode3.selfMember.uniqueAddress && + m.status == MemberStatus.Up && + !clusterNode1.state.unreachable(m)) should ===(true) }, 10.seconds) @@ -558,11 +557,11 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin // one actor on each node up front val actor1 = testKit1.spawn(Behaviors.receive[AnyRef] { - case (ctx, "stop") => - ctx.log.info("Stopping") - Behaviors.stopped - case _ => Behaviors.same - }, "actor1") + case (ctx, "stop") => + ctx.log.info("Stopping") + Behaviors.stopped + case _ => Behaviors.same + }, "actor1") val actor2 = testKit2.spawn(Behaviors.empty[AnyRef], "actor2") system1.receptionist ! Register(TheKey, actor1) @@ -723,7 +722,8 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin } - "handle concurrent unregistration and registration on different nodes".taggedAs(LongRunningTest, GHExcludeAeronTest) in { + "handle concurrent unregistration and registration on different nodes".taggedAs(LongRunningTest, + GHExcludeAeronTest) in { // this covers the fact that with ddata a removal can be lost val testKit1 = ActorTestKit("ClusterReceptionistSpec-test-12", ClusterReceptionistSpec.config) val system1 = testKit1.system diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala index d5f304e03a..2945ad8006 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala @@ -46,16 +46,16 @@ object ReplicatorDocSpec { def apply(key: GCounterKey): Behavior[Command] = Behaviors.setup[Command] { context => - //#selfUniqueAddress + // #selfUniqueAddress implicit val node: SelfUniqueAddress = DistributedData(context.system).selfUniqueAddress - //#selfUniqueAddress + // #selfUniqueAddress // adapter that turns the response messages from the replicator into our own protocol DistributedData.withReplicatorMessageAdapter[Command, GCounter] { replicatorAdapter => - //#subscribe + // #subscribe // Subscribe to changes of the given `key`. replicatorAdapter.subscribe(key, InternalSubscribeResponse.apply) - //#subscribe + // #subscribe def updated(cachedValue: Int): Behavior[Command] = { Behaviors.receiveMessage[Command] { diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala index cb5f2a822b..512d0c28d3 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala @@ -58,7 +58,7 @@ akka { def illustrateJoinSeedNodes(): Unit = { val system: ActorSystem[_] = ??? - //#join-seed-nodes + // #join-seed-nodes import akka.actor.Address import akka.actor.AddressFromURIString import akka.cluster.typed.JoinSeedNodes @@ -66,7 +66,7 @@ akka { val seedNodes: List[Address] = List("akka://ClusterSystem@127.0.0.1:2551", "akka://ClusterSystem@127.0.0.1:2552").map(AddressFromURIString.parse) Cluster(system).manager ! JoinSeedNodes(seedNodes) - //#join-seed-nodes + // #join-seed-nodes } object Backend { @@ -80,21 +80,21 @@ akka { def illustrateRoles(): Unit = { val context: ActorContext[_] = ??? - //#hasRole + // #hasRole val selfMember = Cluster(context.system).selfMember if (selfMember.hasRole("backend")) { context.spawn(Backend(), "back") } else if (selfMember.hasRole("frontend")) { context.spawn(Frontend(), "front") } - //#hasRole + // #hasRole } @nowarn("msg=never used") def illustrateDcAccess(): Unit = { val system: ActorSystem[_] = ??? - //#dcAccess + // #dcAccess val cluster = Cluster(system) // this node's data center val dc = cluster.selfMember.dataCenter @@ -103,7 +103,7 @@ akka { // a specific member's data center val aMember = cluster.state.members.head val aDc = aMember.dataCenter - //#dcAccess + // #dcAccess } } @@ -179,14 +179,14 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual val system2 = ActorSystem[Nothing](Behaviors.empty[Nothing], "ClusterSystem", noPort.withFallback(clusterConfig)) try { - //#cluster-create + // #cluster-create val cluster = Cluster(system) - //#cluster-create + // #cluster-create val cluster2 = Cluster(system2) - //#cluster-join + // #cluster-join cluster.manager ! Join(cluster.selfMember.address) - //#cluster-join + // #cluster-join cluster2.manager ! Join(cluster.selfMember.address) eventually { @@ -194,9 +194,9 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual cluster2.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up) } - //#cluster-leave + // #cluster-leave cluster2.manager ! Leave(cluster2.selfMember.address) - //#cluster-leave + // #cluster-leave eventually { cluster.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up) @@ -222,9 +222,9 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual val probe1 = TestProbe[MemberEvent]()(system1) val subscriber = probe1.ref - //#cluster-subscribe + // #cluster-subscribe cluster.subscriptions ! Subscribe(subscriber, classOf[MemberEvent]) - //#cluster-subscribe + // #cluster-subscribe cluster1.manager ! Join(cluster1.selfMember.address) eventually { @@ -257,10 +257,10 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual } val anotherMemberAddress = cluster2.selfMember.address - //#cluster-leave-example + // #cluster-leave-example cluster.manager ! Leave(anotherMemberAddress) // subscriber will receive events MemberLeft, MemberExited and MemberRemoved - //#cluster-leave-example + // #cluster-leave-example probe1.within(10.seconds) { probe1.expectMessageType[MemberLeft].member.address shouldEqual cluster2.selfMember.address probe1.expectMessageType[MemberExited].member.address shouldEqual cluster2.selfMember.address diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala index 0017e521ec..c474f6f809 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala @@ -83,7 +83,7 @@ object Publisher { object RegistrationService { def apply(): Behavior[AnyRef] = { - //#publisher + // #publisher Behaviors.setup[AnyRef] { context => import akka.cluster.pubsub.DistributedPubSub import akka.cluster.pubsub.DistributedPubSubMediator @@ -121,7 +121,7 @@ object Publisher { Behaviors.unhandled } } - //#publisher + // #publisher } } @@ -132,7 +132,7 @@ object Ingestion { import Ontology._ def apply(dt: DataType, mediator: akka.actor.ActorRef): Behavior[DataEvent] = { - //#destination + // #destination Behaviors.setup { context => // register to the path import akka.actor.typed.scaladsl.adapter._ @@ -140,7 +140,7 @@ object Ingestion { idle(dt, mediator) } - //#destination + // #destination } private def idle(dt: DataType, mediator: akka.actor.ActorRef): Behavior[DataEvent] = @@ -160,7 +160,7 @@ object Ingestion { /** Would normally be typed more specifically. */ private def active(key: DataKey, sink: Option[DataSink], mediator: akka.actor.ActorRef): Behavior[DataEvent] = - //#publisher + // #publisher Behaviors.setup { context => Behaviors.receiveMessagePartial[DataEvent] { case e: DataEnvelope if e.key == key => @@ -175,7 +175,7 @@ object Ingestion { Behaviors.stopped } } - //#publisher + // #publisher } @@ -184,7 +184,7 @@ object Subscriber { def apply(key: DataKey, mediator: akka.actor.ActorRef): Behavior[DataEvent] = { - //#subscriber + // #subscriber Behaviors.setup[DataEvent] { context => import akka.actor.typed.scaladsl.adapter._ @@ -200,7 +200,7 @@ object Subscriber { wonderland() case IngestionStarted(k, path) if k == key => - //#send + // #send // simulate data sent from various data sources: (1 to 100).foreach { n => mediator ! DistributedPubSubMediator.Send( @@ -208,12 +208,12 @@ object Subscriber { msg = DataEnvelope(key, s"hello-$key-$n"), localAffinity = true) } - //#send + // #send andThen(key, mediator) } } - //#subscriber + // #subscriber } private def wonderland(): Behavior[DataEvent] = { @@ -261,9 +261,9 @@ object DataPlatform { def apply(): Behavior[ProvisionCommand] = { Behaviors.setup { context => - //#mediator + // #mediator val mediator = DistributedPubSub(context.system).mediator - //#mediator + // #mediator val service = context.spawn(DataService(mediator), "data") Behaviors.receiveMessagePartial { diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala index e906d72087..6cb278099d 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala @@ -13,7 +13,7 @@ import akka.actor.typed.scaladsl.Behaviors //#import object PingPongExample { - //#ping-service + // #ping-service object PingService { val PingServiceKey = ServiceKey[Ping]("pingService") @@ -33,9 +33,9 @@ object PingPongExample { } } } - //#ping-service + // #ping-service - //#pinger + // #pinger object Pinger { def apply(pingService: ActorRef[PingService.Ping]): Behavior[PingService.Pong.type] = { Behaviors.setup { context => @@ -48,9 +48,9 @@ object PingPongExample { } } } - //#pinger + // #pinger - //#pinger-guardian + // #pinger-guardian object Guardian { def apply(): Behavior[Nothing] = { Behaviors @@ -67,9 +67,9 @@ object PingPongExample { .narrow } } - //#pinger-guardian + // #pinger-guardian - //#find + // #find object PingManager { sealed trait Command case object PingAll extends Command @@ -92,12 +92,12 @@ object PingPongExample { } } } - //#find + // #find Behaviors.setup[PingService.Ping] { context => - //#deregister + // #deregister context.system.receptionist ! Receptionist.Deregister(PingService.PingServiceKey, context.self) - //#deregister + // #deregister Behaviors.empty } } diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala index 467d734ce0..1a6eefc910 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala @@ -14,7 +14,7 @@ object SingletonCompileOnlySpec { val system = ActorSystem(Behaviors.empty, "Singleton") - //#counter + // #counter object Counter { sealed trait Command case object Increment extends Command @@ -38,9 +38,9 @@ object SingletonCompileOnlySpec { updated(0) } } - //#counter + // #counter - //#singleton + // #singleton import akka.cluster.typed.ClusterSingleton import akka.cluster.typed.SingletonActor @@ -50,24 +50,24 @@ object SingletonCompileOnlySpec { SingletonActor(Behaviors.supervise(Counter()).onFailure[Exception](SupervisorStrategy.restart), "GlobalCounter")) proxy ! Counter.Increment - //#singleton + // #singleton - //#stop-message + // #stop-message val singletonActor = SingletonActor(Counter(), "GlobalCounter").withStopMessage(Counter.GoodByeCounter) singletonManager.init(singletonActor) - //#stop-message + // #stop-message - //#backoff + // #backoff val proxyBackOff: ActorRef[Counter.Command] = singletonManager.init( SingletonActor( Behaviors .supervise(Counter()) .onFailure[Exception](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 0.2)), "GlobalCounter")) - //#backoff + // #backoff - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc val singletonProxy: ActorRef[Counter.Command] = ClusterSingleton(system).init( SingletonActor(Counter(), "GlobalCounter").withSettings(ClusterSingletonSettings(system).withDataCenter("dc2"))) - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index fb9484d550..f29e36a447 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -136,8 +136,8 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { private def checkAutoDownUsage(): Unit = { if (settings.DowningProviderClassName == "akka.cluster.AutoDowning" || - (settings.config.hasPath("auto-down-unreachable-after") && settings.config.getString( - "auto-down-unreachable-after") != "off")) + (settings.config.hasPath("auto-down-unreachable-after") && settings.config.getString( + "auto-down-unreachable-after") != "off")) logWarning( "auto-down has been removed in Akka 2.6.0. See " + "https://doc.akka.io/docs/akka/2.6/typed/cluster.html#downing for alternatives.") diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala index 09bb3958e4..6222280b01 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala @@ -90,7 +90,7 @@ private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: Dynami // user has not specified nr-of-instances val config2 = if (config.hasPath("cluster.enabled") && config.getBoolean("cluster.enabled") && !config.hasPath( - "nr-of-instances")) { + "nr-of-instances")) { val maxTotalNrOfInstances = config.withFallback(default).getInt("cluster.max-total-nr-of-instances") ConfigFactory.parseString("nr-of-instances=" + maxTotalNrOfInstances).withFallback(config) } else config diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 2d4f0e00a8..ce409a8142 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -791,9 +791,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) val newMembers = localMembers + Member(joiningNode, roles, appVersion) + Member( - selfUniqueAddress, - cluster.selfRoles, - cluster.settings.AppVersion) + selfUniqueAddress, + cluster.selfRoles, + cluster.settings.AppVersion) val newGossip = latestGossip.copy(members = newMembers) updateLatestGossip(newGossip) @@ -1048,14 +1048,15 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh */ def receiveGossip(envelope: GossipEnvelope): ReceiveGossipType = { val from = envelope.from - val remoteGossip = try { - envelope.gossip - } catch { - case NonFatal(t) => - gossipLogger.logWarning("Invalid Gossip. This should only happen during a rolling upgrade. {}", t.getMessage) - Gossip.empty + val remoteGossip = + try { + envelope.gossip + } catch { + case NonFatal(t) => + gossipLogger.logWarning("Invalid Gossip. This should only happen during a rolling upgrade. {}", t.getMessage) + Gossip.empty - } + } val localGossip = latestGossip if (remoteGossip eq Gossip.empty) { @@ -1274,8 +1275,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } def checkForPrepareForShutdown(): Unit = { - if (MembershipState.allowedToPrepareToShutdown(latestGossip.member(selfUniqueAddress).status) && latestGossip.members - .exists(m => MembershipState.prepareForShutdownStates(m.status))) { + if (MembershipState.allowedToPrepareToShutdown( + latestGossip.member(selfUniqueAddress).status) && latestGossip.members + .exists(m => MembershipState.prepareForShutdownStates(m.status))) { logDebug("Detected full cluster shutdown") self ! ClusterUserAction.PrepareForShutdown } @@ -1287,8 +1289,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // status Down. The down commands should spread before we shutdown. val unreachable = membershipState.dcReachability.allUnreachableOrTerminated val downed = membershipState.dcMembers.collect { case m if m.status == Down => m.uniqueAddress } - if (selfDownCounter >= MaxTicksBeforeShuttingDownMyself || downed.forall( - node => unreachable(node) || latestGossip.seenByNode(node))) { + if (selfDownCounter >= MaxTicksBeforeShuttingDownMyself || downed.forall(node => + unreachable(node) || latestGossip.seenByNode(node))) { // the reason for not shutting down immediately is to give the gossip a chance to spread // the downing information to other downed nodes, so that they can shutdown themselves logInfo("Node has been marked as DOWN. Shutting down myself") @@ -1377,7 +1379,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh val updatedGossip: Gossip = if (removedUnreachable.nonEmpty || removedExitingConfirmed.nonEmpty || changedMembers.nonEmpty || - removedOtherDc.nonEmpty) { + removedOtherDc.nonEmpty) { // replace changed members val removed = removedUnreachable @@ -1557,10 +1559,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // needed for tests def sendGossipTo(address: Address): Unit = { - latestGossip.members.foreach( - m => - if (m.address == address) - gossipTo(m.uniqueAddress)) + latestGossip.members.foreach(m => + if (m.address == address) + gossipTo(m.uniqueAddress)) } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 7910df4d92..5a15203c88 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -67,11 +67,7 @@ object ClusterEvent { // for binary compatibility (used to be a case class) object CurrentClusterState extends AbstractFunction5[ - immutable.SortedSet[Member], - Set[Member], - Set[Address], - Option[Address], - Map[String, Option[Address]], + immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]], CurrentClusterState] { @nowarn("msg=deprecated") @@ -105,11 +101,7 @@ object ClusterEvent { val unreachableDataCenters: Set[DataCenter], @InternalApi private[akka] val memberTombstones: Set[UniqueAddress]) extends Product5[ - immutable.SortedSet[Member], - Set[Member], - Set[Address], - Option[Address], - Map[String, Option[Address]]] + immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]]] with Serializable { // for binary compatibility @@ -488,7 +480,8 @@ object ClusterEvent { oldState.dcReachabilityNoOutsideNodes.allUnreachable.iterator .collect { case node - if newGossip.hasMember(node) && newState.dcReachabilityNoOutsideNodes.isReachable(node) && node != newState.selfUniqueAddress => + if newGossip.hasMember(node) && newState.dcReachabilityNoOutsideNodes.isReachable( + node) && node != newState.selfUniqueAddress => ReachableMember(newGossip.member(node)) } .to(immutable.IndexedSeq) @@ -517,7 +510,7 @@ object ClusterEvent { if (newState eq oldState) Nil else { val otherDcs = (oldState.latestGossip.allDataCenters - .union(newState.latestGossip.allDataCenters)) - newState.selfDc + .union(newState.latestGossip.allDataCenters)) - newState.selfDc val oldUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(oldState)) val currentUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(newState)) @@ -536,7 +529,7 @@ object ClusterEvent { if (newState eq oldState) Nil else { val otherDcs = (oldState.latestGossip.allDataCenters - .union(newState.latestGossip.allDataCenters)) - newState.selfDc + .union(newState.latestGossip.allDataCenters)) - newState.selfDc val oldUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(oldState)) val currentUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(newState)) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index 8bfb629b1f..0082a0547e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -178,16 +178,16 @@ private[cluster] class ClusterHeartbeatSender extends Actor { } def init(snapshot: CurrentClusterState): Unit = { - val nodes = snapshot.members.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } + val nodes = snapshot.members.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } val unreachable = snapshot.unreachable.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } state = state.init(nodes, unreachable) } def addMember(m: Member): Unit = if (m.uniqueAddress != selfUniqueAddress && // is not self - !state.contains(m.uniqueAddress) && // not already added - filterInternalClusterMembers(m) // should be watching members from this DC (internal / external) - ) { + !state.contains(m.uniqueAddress) && // not already added + filterInternalClusterMembers(m) // should be watching members from this DC (internal / external) + ) { state = state.addMember(m.uniqueAddress) } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala index e8f673171f..59370329dd 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala @@ -162,7 +162,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { s"""{ | "address": "${m.address}", | "roles": [${if (m.roles.isEmpty) "" - else m.roles.toList.sorted.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], + else m.roles.toList.sorted.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], | "status": "${m.status}", | "app-version": "${m.appVersion}" | }""".stripMargin @@ -177,7 +177,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { s"""{ | "node": "${subject.address}", | "observed-by": [${if (observerAddresses.isEmpty) "" - else observerAddresses.mkString("\n ", ",\n ", "\n ")}] + else observerAddresses.mkString("\n ", ",\n ", "\n ")}] | }""".stripMargin } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index fee027306b..52fe787c49 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -59,94 +59,96 @@ import akka.dispatch.UnboundedMessageQueueSemantics private val eventBusListener: ActorRef = { cluster.system .systemActorOf(Props(new Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { - override def preStart(): Unit = cluster.subscribe(this.self, classOf[ClusterDomainEvent]) + override def preStart(): Unit = cluster.subscribe(this.self, classOf[ClusterDomainEvent]) - // make sure that final state has member status Removed - override def postStop(): Unit = { - selfRemoved() // make sure it ends as Removed even though MemberRemoved message didn't make it - } + // make sure that final state has member status Removed + override def postStop(): Unit = { + selfRemoved() // make sure it ends as Removed even though MemberRemoved message didn't make it + } - private def selfRemoved(): Unit = { - val oldState = _state.get() - // keepig latestStats, but otherwise clear everything - val newState = oldState.copy( - clusterState = CurrentClusterState(), - reachability = Reachability.empty, - selfMember = oldState.selfMember.copy(MemberStatus.Removed)) - _state.set(newState) - } - - def receive: Receive = { - case e: ClusterDomainEvent => + private def selfRemoved(): Unit = { val oldState = _state.get() - val oldClusterState = oldState.clusterState - e match { - case SeenChanged(_, seenBy) => - _state.set(oldState.copy(clusterState = oldClusterState.copy(seenBy = seenBy))) - case ReachabilityChanged(reachability) => - _state.set(oldState.copy(reachability = reachability)) - case MemberRemoved(member, _) if member.address == selfAddress => - selfRemoved() - case MemberRemoved(member, _) => - _state.set( - oldState.copy( - clusterState = oldClusterState.copy( - members = oldClusterState.members - member, - unreachable = oldClusterState.unreachable - member))) - case UnreachableMember(member) => - // replace current member with new member (might have different status, only address is used in equals) - _state.set( - oldState.copy( - clusterState = oldClusterState.copy(unreachable = oldClusterState.unreachable - member + member))) - case ReachableMember(member) => - _state.set( - oldState.copy( - clusterState = oldClusterState.copy(unreachable = oldClusterState.unreachable - member))) - case event: MemberEvent => - val member = event.member - // replace current member with new member (might have different status, only address is used in equals) - val newUnreachable = - if (oldClusterState.unreachable.contains(member)) oldClusterState.unreachable - member + member - else oldClusterState.unreachable - val newSelfMember = if (member.address == selfAddress) member else oldState.selfMember - _state.set( - oldState.copy( - clusterState = oldClusterState - .copy(members = oldClusterState.members - member + member, unreachable = newUnreachable), - selfMember = newSelfMember)) - case LeaderChanged(leader) => - _state.set(oldState.copy(clusterState = oldClusterState.copy(leader = leader))) - case RoleLeaderChanged(role, leader) => - _state.set( - oldState.copy(clusterState = - oldClusterState.copy(roleLeaderMap = oldClusterState.roleLeaderMap + (role -> leader)))) - case stats: CurrentInternalStats => - _state.set(oldState.copy(latestStats = stats)) - case ClusterShuttingDown => - case r: ReachableDataCenter => - _state.set( - oldState.copy(clusterState = - oldClusterState.withUnreachableDataCenters(oldClusterState.unreachableDataCenters - r.dataCenter))) - case r: UnreachableDataCenter => - _state.set( - oldState.copy(clusterState = - oldClusterState.withUnreachableDataCenters(oldClusterState.unreachableDataCenters + r.dataCenter))) - case MemberTombstonesChanged(tombstones) => - _state.set(oldState.copy(clusterState = oldClusterState.withMemberTombstones(tombstones))) - case unexpected => - throw new IllegalArgumentException(s"Unexpected cluster event type ${unexpected.getClass}") // compiler exhaustiveness check pleaser - } + // keepig latestStats, but otherwise clear everything + val newState = oldState.copy( + clusterState = CurrentClusterState(), + reachability = Reachability.empty, + selfMember = oldState.selfMember.copy(MemberStatus.Removed)) + _state.set(newState) + } - // once captured, optional verbose logging of event - logInfoVerbose(e) + def receive: Receive = { + case e: ClusterDomainEvent => + val oldState = _state.get() + val oldClusterState = oldState.clusterState + e match { + case SeenChanged(_, seenBy) => + _state.set(oldState.copy(clusterState = oldClusterState.copy(seenBy = seenBy))) + case ReachabilityChanged(reachability) => + _state.set(oldState.copy(reachability = reachability)) + case MemberRemoved(member, _) if member.address == selfAddress => + selfRemoved() + case MemberRemoved(member, _) => + _state.set( + oldState.copy( + clusterState = oldClusterState.copy( + members = oldClusterState.members - member, + unreachable = oldClusterState.unreachable - member))) + case UnreachableMember(member) => + // replace current member with new member (might have different status, only address is used in equals) + _state.set( + oldState.copy( + clusterState = oldClusterState.copy(unreachable = oldClusterState.unreachable - member + member))) + case ReachableMember(member) => + _state.set( + oldState.copy( + clusterState = oldClusterState.copy(unreachable = oldClusterState.unreachable - member))) + case event: MemberEvent => + val member = event.member + // replace current member with new member (might have different status, only address is used in equals) + val newUnreachable = + if (oldClusterState.unreachable.contains(member)) oldClusterState.unreachable - member + member + else oldClusterState.unreachable + val newSelfMember = if (member.address == selfAddress) member else oldState.selfMember + _state.set( + oldState.copy( + clusterState = oldClusterState + .copy(members = oldClusterState.members - member + member, unreachable = newUnreachable), + selfMember = newSelfMember)) + case LeaderChanged(leader) => + _state.set(oldState.copy(clusterState = oldClusterState.copy(leader = leader))) + case RoleLeaderChanged(role, leader) => + _state.set( + oldState.copy(clusterState = + oldClusterState.copy(roleLeaderMap = oldClusterState.roleLeaderMap + (role -> leader)))) + case stats: CurrentInternalStats => + _state.set(oldState.copy(latestStats = stats)) + case ClusterShuttingDown => + case r: ReachableDataCenter => + _state.set( + oldState.copy(clusterState = + oldClusterState.withUnreachableDataCenters( + oldClusterState.unreachableDataCenters - r.dataCenter))) + case r: UnreachableDataCenter => + _state.set( + oldState.copy(clusterState = + oldClusterState.withUnreachableDataCenters( + oldClusterState.unreachableDataCenters + r.dataCenter))) + case MemberTombstonesChanged(tombstones) => + _state.set(oldState.copy(clusterState = oldClusterState.withMemberTombstones(tombstones))) + case unexpected => + throw new IllegalArgumentException(s"Unexpected cluster event type ${unexpected.getClass}") // compiler exhaustiveness check pleaser + } - case s: CurrentClusterState => - val oldState = _state.get() - val newSelfMember = - s.members.find(_.uniqueAddress == cluster.selfUniqueAddress).getOrElse(oldState.selfMember) - _state.set(oldState.copy(clusterState = s, selfMember = newSelfMember)) - } - }).withDispatcher(cluster.settings.UseDispatcher).withDeploy(Deploy.local), name = "clusterEventBusListener") + // once captured, optional verbose logging of event + logInfoVerbose(e) + + case s: CurrentClusterState => + val oldState = _state.get() + val newSelfMember = + s.members.find(_.uniqueAddress == cluster.selfUniqueAddress).getOrElse(oldState.selfMember) + _state.set(oldState.copy(clusterState = s, selfMember = newSelfMember)) + } + }).withDispatcher(cluster.settings.UseDispatcher).withDeploy(Deploy.local), name = "clusterEventBusListener") } def state: CurrentClusterState = _state.get().clusterState diff --git a/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala b/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala index 599005d1ac..f04a4a6928 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala @@ -46,8 +46,8 @@ private[akka] class CoordinatedShutdownLeave extends Actor { // not joined yet done(replyTo) } else if (s.members.exists(m => - m.uniqueAddress == cluster.selfUniqueAddress && - (m.status == Leaving || m.status == Exiting || m.status == Down))) { + m.uniqueAddress == cluster.selfUniqueAddress && + (m.status == Leaving || m.status == Exiting || m.status == Down))) { done(replyTo) } case MemberLeft(m) => diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index 808c1c9ade..b29cd056c2 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -336,21 +336,22 @@ private[cluster] object CrossDcHeartbeatingState { crossDcFailureDetector: FailureDetectorRegistry[Address], nrOfMonitoredNodesPerDc: Int, members: immutable.SortedSet[Member]): CrossDcHeartbeatingState = { - new CrossDcHeartbeatingState(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodesPerDc, state = { - // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc - val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) + new CrossDcHeartbeatingState(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodesPerDc, + state = { + // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc + val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) - if (members.ordering == Member.ageOrdering) { - // we already have the right ordering - groupedByDc - } else { - // we need to enforce the ageOrdering for the SortedSet in each DC - groupedByDc.map { - case (dc, ms) => - dc -> immutable.SortedSet.empty[Member](Member.ageOrdering).union(ms) + if (members.ordering == Member.ageOrdering) { + // we already have the right ordering + groupedByDc + } else { + // we need to enforce the ageOrdering for the SortedSet in each DC + groupedByDc.map { + case (dc, ms) => + dc -> immutable.SortedSet.empty[Member](Member.ageOrdering).union(ms) + } } - } - }) + }) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala index 11abf0a452..0ca9727afd 100644 --- a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala @@ -36,8 +36,8 @@ final class JoinConfigCompatCheckCluster extends JoinConfigCompatChecker { val actualDowningProvider = actualConfig.getString(DowningProviderPath) val downingProviderResult = if (toCheckDowningProvider == actualDowningProvider || Set(toCheckDowningProvider, actualDowningProvider) == Set( - AkkaSbrProviderClass, - LightbendSbrProviderClass)) + AkkaSbrProviderClass, + LightbendSbrProviderClass)) Valid else JoinConfigCompatChecker.checkEquality(List(DowningProviderPath), toCheck, actualConfig) diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index db186f98fb..601d2949ba 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -43,7 +43,7 @@ class Member private[cluster] ( } override def toString: String = { s"Member($address, $status${if (dataCenter == ClusterSettings.DefaultDataCenter) "" else s", $dataCenter"}${if (appVersion == Version.Zero) "" - else s", $appVersion"})" + else s", $appVersion"})" } def hasRole(role: String): Boolean = roles.contains(role) diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala index d7901bdca7..195591c345 100644 --- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -69,11 +69,10 @@ import akka.util.ccompat._ // only assigned once. def memberHinderingConvergenceExists = { val memberStatus = if (firstMemberInDc) convergenceMemberStatus + Joining + WeaklyUp else convergenceMemberStatus - members.exists( - member => - (firstMemberInDc || member.dataCenter == selfDc) && - memberStatus(member.status) && - !(latestGossip.seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) + members.exists(member => + (firstMemberInDc || member.dataCenter == selfDc) && + memberStatus(member.status) && + !(latestGossip.seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) } // Find cluster members in the data center that are unreachable from other members of the data center @@ -174,11 +173,10 @@ import akka.util.ccompat._ val reachableMembersInDc = if (reachability.isAllReachable) mbrs.filter(m => m.dataCenter == selfDc && m.status != Down) else - mbrs.filter( - m => - m.dataCenter == selfDc && - m.status != Down && - (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) + mbrs.filter(m => + m.dataCenter == selfDc && + m.status != Down && + (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) if (reachableMembersInDc.isEmpty) None else reachableMembersInDc diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index 60254f0838..5b2455ba20 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -147,8 +147,8 @@ private[cluster] class Reachability private ( this else { if (status == Reachable && oldObserverRows.forall { - case (_, r) => r.status == Reachable || r.subject == subject - }) { + case (_, r) => r.status == Reachable || r.subject == subject + }) { // all Reachable, prune by removing the records of the observer, and bump the version new Reachability(records.filterNot(_.observer == observer), newVersions) } else { diff --git a/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala b/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala index 22c15ad919..7fb489e8c1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala +++ b/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala @@ -280,7 +280,6 @@ private[cluster] final class JoinSeedNodeProcess( receiveInitJoinAckIncompatibleConfig(joinTo = address, origin = sender(), behavior = Some(done)) case InitJoinNack(_) => // that seed was uninitialized - case ReceiveTimeout => if (attempt >= 2) logWarning( diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index 3b1784b3fe..e4a96f8f53 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -358,13 +358,14 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) private def uniqueAddressFromProto(uniqueAddress: cm.UniqueAddress): UniqueAddress = { - UniqueAddress(addressFromProto(uniqueAddress.getAddress), if (uniqueAddress.hasUid2) { - // new remote node join the two parts of the long uid back - (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0XFFFFFFFFL) - } else { - // old remote node - uniqueAddress.getUid.toLong - }) + UniqueAddress(addressFromProto(uniqueAddress.getAddress), + if (uniqueAddress.hasUid2) { + // new remote node join the two parts of the long uid back + (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0xFFFFFFFFL) + } else { + // old remote node + uniqueAddress.getUid.toLong + }) } private val memberStatusToInt = scala.collection.immutable.HashMap[MemberStatus, Int]( @@ -469,13 +470,12 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) case (observer, version) => val subjectReachability = reachability .recordsFrom(observer) - .map( - r => - cm.SubjectReachability - .newBuilder() - .setAddressIndex(mapUniqueAddress(r.subject)) - .setStatus(cm.ReachabilityStatus.forNumber(reachabilityStatusToInt(r.status))) - .setVersion(r.version)) + .map(r => + cm.SubjectReachability + .newBuilder() + .setAddressIndex(mapUniqueAddress(r.subject)) + .setStatus(cm.ReachabilityStatus.forNumber(reachabilityStatusToInt(r.status))) + .setVersion(r.version)) cm.ObserverReachability .newBuilder() .setAddressIndex(mapUniqueAddress(observer)) diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index f865dca5bb..25d23a200c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -55,7 +55,7 @@ object ClusterRouterGroupSettings { routeesPaths = immutableSeq(config.getStringList("routees.paths")), allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption( - config.getString("cluster.use-role"))) + config.getString("cluster.use-role"))) def apply( totalInstances: Int, @@ -207,7 +207,7 @@ object ClusterRouterPoolSettings { maxInstancesPerNode = config.getInt("cluster.max-nr-of-instances-per-node"), allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption( - config.getString("cluster.use-role"))) + config.getString("cluster.use-role"))) def unapply(settings: ClusterRouterPoolSettings): Option[(Int, Int, Boolean, Set[String])] = Some((settings.totalInstances, settings.maxInstancesPerNode, settings.allowLocalRoutees, settings.useRoles)) diff --git a/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala b/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala index e56425197f..2b987bcc9f 100644 --- a/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala +++ b/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala @@ -103,13 +103,12 @@ import akka.coordination.lease.scaladsl.Lease * changed to Exiting on the other side of the partition. */ def members(includingPossiblyUp: Boolean, excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] = - _allMembers.filterNot( - m => - (!includingPossiblyUp && m.status == MemberStatus.Joining) || - (!includingPossiblyUp && m.status == MemberStatus.WeaklyUp) || - (excludingPossiblyExiting && m.status == MemberStatus.Leaving) || - m.status == MemberStatus.Down || - m.status == MemberStatus.Exiting) + _allMembers.filterNot(m => + (!includingPossiblyUp && m.status == MemberStatus.Joining) || + (!includingPossiblyUp && m.status == MemberStatus.WeaklyUp) || + (excludingPossiblyExiting && m.status == MemberStatus.Leaving) || + m.status == MemberStatus.Down || + m.status == MemberStatus.Exiting) def membersWithRole: immutable.SortedSet[Member] = membersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = false) @@ -209,10 +208,9 @@ import akka.coordination.lease.scaladsl.Lease private[sbr] def setReachability(r: Reachability): Unit = { // skip records with Reachability.Reachable, and skip records related to other DC - _reachability = r.filterRecords( - record => - (record.status == Reachability.Unreachable || record.status == Reachability.Terminated) && - isInSelfDc(record.observer) && isInSelfDc(record.subject)) + _reachability = r.filterRecords(record => + (record.status == Reachability.Unreachable || record.status == Reachability.Terminated) && + isInSelfDc(record.observer) && isInSelfDc(record.subject)) } def seenBy: Set[Address] = diff --git a/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala b/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala index 7381acbda4..8797defea3 100644 --- a/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala +++ b/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala @@ -475,7 +475,7 @@ import akka.remote.artery.ThisActorSystemQuarantinedEvent log.warning( ClusterLogMarker.sbrDowning(decision), s"SBR took decision $decision and is downing [${nodesToDown.map(_.address).mkString(", ")}]${if (downMyself) " including myself," - else ""}, " + + else ""}, " + s"[${strategy.unreachable.size}] unreachable of [${strategy.members.size}] members" + indirectlyConnectedLogMessage + s", all members in DC [${strategy.allMembersInDC.mkString(", ")}], full reachability status: [${strategy.reachability}]" + diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala index 20f1a75151..eead5fbbdd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala @@ -87,19 +87,19 @@ abstract class ClusterDeathWatchSpec val path3 = RootActorPath(third) / "user" / "subject" val watchEstablished = TestLatch(2) system.actorOf(Props(new Actor { - context.actorSelection(path2) ! Identify(path2) - context.actorSelection(path3) ! Identify(path3) + context.actorSelection(path2) ! Identify(path2) + context.actorSelection(path3) ! Identify(path3) - def receive = { - case ActorIdentity(`path2`, Some(ref)) => - context.watch(ref) - watchEstablished.countDown() - case ActorIdentity(`path3`, Some(ref)) => - context.watch(ref) - watchEstablished.countDown() - case Terminated(actor) => testActor ! actor.path - } - }).withDeploy(Deploy.local), name = "observer1") + def receive = { + case ActorIdentity(`path2`, Some(ref)) => + context.watch(ref) + watchEstablished.countDown() + case ActorIdentity(`path3`, Some(ref)) => + context.watch(ref) + watchEstablished.countDown() + case Terminated(actor) => testActor ! actor.path + } + }).withDeploy(Deploy.local), name = "observer1") watchEstablished.await enterBarrier("watch-established") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala index 33f84e096b..957cb8dd53 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala @@ -42,10 +42,10 @@ abstract class ClusterShutdownSpec extends MultiNodeClusterSpec(ClusterShutdownS runOn(first, second, third) { awaitAssert({ - withClue("members: " + Cluster(system).readView.members) { - Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown - } - }, 10.seconds) + withClue("members: " + Cluster(system).readView.members) { + Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown + } + }, 10.seconds) } } "spread around the cluster" in { @@ -72,26 +72,27 @@ abstract class ClusterShutdownSpec extends MultiNodeClusterSpec(ClusterShutdownS runOn(first) { Cluster(system).leave(address(first)) } - awaitAssert({ - withClue("members: " + Cluster(system).readView.members) { - runOn(second, third) { - Cluster(system).readView.members.size shouldEqual 2 + awaitAssert( + { + withClue("members: " + Cluster(system).readView.members) { + runOn(second, third) { + Cluster(system).readView.members.size shouldEqual 2 + } + runOn(first) { + Cluster(system).selfMember.status shouldEqual Removed + } } - runOn(first) { - Cluster(system).selfMember.status shouldEqual Removed - } - } - }, 10.seconds) + }, 10.seconds) enterBarrier("first-gone") runOn(second) { Cluster(system).leave(address(second)) Cluster(system).leave(address(third)) } awaitAssert({ - withClue("self member: " + Cluster(system).selfMember) { - Cluster(system).selfMember.status shouldEqual Removed - } - }, 10.seconds) + withClue("self member: " + Cluster(system).selfMember) { + Cluster(system).selfMember.status shouldEqual Removed + } + }, 10.seconds) enterBarrier("all-gone") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala index 38221371b8..93be9ba0df 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala @@ -46,18 +46,18 @@ abstract class InitialHeartbeatSpec extends MultiNodeClusterSpec(InitialHeartbea runOn(first) { within(10 seconds) { awaitAssert({ - cluster.sendCurrentClusterState(testActor) - expectMsgType[CurrentClusterState].members.map(_.address) should contain(secondAddress) - }, interval = 50.millis) + cluster.sendCurrentClusterState(testActor) + expectMsgType[CurrentClusterState].members.map(_.address) should contain(secondAddress) + }, interval = 50.millis) } } runOn(second) { cluster.join(first) within(10 seconds) { awaitAssert({ - cluster.sendCurrentClusterState(testActor) - expectMsgType[CurrentClusterState].members.map(_.address) should contain(firstAddress) - }, interval = 50.millis) + cluster.sendCurrentClusterState(testActor) + expectMsgType[CurrentClusterState].members.map(_.address) should contain(firstAddress) + }, interval = 50.millis) } } enterBarrier("second-joined") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala index 0a618d0d78..8468fdcd83 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala @@ -78,9 +78,9 @@ abstract class InitialMembersOfNewDcSpec "see all dc1 nodes see each other as up" in { runOn(two, three) { within(20.seconds) { - awaitAssert({ + awaitAssert { cluster.state.members.filter(_.status == MemberStatus.Up) should have size 3 - }) + } } } enterBarrier("dc1 fully up") @@ -97,9 +97,9 @@ abstract class InitialMembersOfNewDcSpec // Check how long it takes for all other nodes to see every node as up runOn(one, two, three, four) { within(20.seconds) { - awaitAssert({ + awaitAssert { cluster.state.members.filter(_.status == MemberStatus.Up) should have size 4 - }) + } } val totalTime = System.nanoTime() - startTime log.info("Can see new node (and all others as up): {}ms", totalTime.nanos.toMillis) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala index c74ba50e8e..41fc1ae760 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala @@ -160,7 +160,7 @@ abstract class LargeMessageClusterSpec // for non Aeron transport we use the Slow message and SlowSerializer to slow down // to not completely overload the machine/network, see issue #24576 val arterySettings = ArterySettings(system.settings.config.getConfig("akka.remote.artery")) - val aeronUdpEnabled = (arterySettings.Enabled && arterySettings.Transport == ArterySettings.AeronUpd) + val aeronUdpEnabled = arterySettings.Enabled && arterySettings.Transport == ArterySettings.AeronUpd runOn(second) { val largeEcho2 = identify(second, "largeEcho") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index f18d5d0d63..6db9921840 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -58,7 +58,7 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { val currentRoles = sortedRoles.drop(alreadyShutdown) - currentRoles.size should be >= (2) + currentRoles.size should be >= 2 val leader = currentRoles.head val aUser = currentRoles.last val remainingRoles = currentRoles.tail @@ -87,7 +87,7 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig // user marks the shutdown leader as DOWN cluster.down(leaderAddress) // removed - awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain (leaderAddress)) + awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain leaderAddress) enterBarrier("after-down" + n, "completed" + n) case _ if remainingRoles.contains(myself) => @@ -116,7 +116,8 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig enterBarrier("after-2") } - "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in within(30 seconds) { + "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in within( + 30 seconds) { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) enterBarrier("after-3") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index 4c10ef5221..d5a4880891 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -62,15 +62,16 @@ abstract class LeaderLeavingSpec extends MultiNodeClusterSpec(LeaderLeavingMulti val exitingLatch = TestLatch() - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case state: CurrentClusterState => - if (state.members.exists(m => m.address == oldLeaderAddress && m.status == Exiting)) - exitingLatch.countDown() - case MemberExited(m) if m.address == oldLeaderAddress => exitingLatch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case state: CurrentClusterState => + if (state.members.exists(m => m.address == oldLeaderAddress && m.status == Exiting)) + exitingLatch.countDown() + case MemberExited(m) if m.address == oldLeaderAddress => exitingLatch.countDown() + case _ => // ignore + } + }).withDeploy(Deploy.local)), classOf[MemberEvent]) enterBarrier("registered-listener") enterBarrier("leader-left") @@ -82,13 +83,13 @@ abstract class LeaderLeavingSpec extends MultiNodeClusterSpec(LeaderLeavingMulti markNodeAsUnavailable(oldLeaderAddress) // verify that the LEADER is no longer part of the 'members' set - awaitAssert(clusterView.members.map(_.address) should not contain (oldLeaderAddress)) + awaitAssert(clusterView.members.map(_.address) should not contain oldLeaderAddress) // verify that the LEADER is not part of the 'unreachable' set - awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain (oldLeaderAddress)) + awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain oldLeaderAddress) // verify that we have a new LEADER - awaitAssert(clusterView.leader should not be (oldLeaderAddress)) + awaitAssert(clusterView.leader should not be oldLeaderAddress) } enterBarrier("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index fef9740e7f..b6566608dd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -36,17 +36,18 @@ abstract class MembershipChangeListenerUpSpec extends MultiNodeClusterSpec(Membe runOn(first, second) { val latch = TestLatch() val expectedAddresses = Set(first, second).map(address) - cluster.subscribe(system.actorOf(Props(new Actor { - var members = Set.empty[Member] - def receive = { - case state: CurrentClusterState => members = state.members - case MemberUp(m) => - members = members - m + m - if (members.map(_.address) == expectedAddresses) - latch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + var members = Set.empty[Member] + def receive = { + case state: CurrentClusterState => members = state.members + case MemberUp(m) => + members = members - m + m + if (members.map(_.address) == expectedAddresses) + latch.countDown() + case _ => // ignore + } + }).withDeploy(Deploy.local)), classOf[MemberEvent]) enterBarrier("listener-1-registered") cluster.join(first) latch.await @@ -63,17 +64,18 @@ abstract class MembershipChangeListenerUpSpec extends MultiNodeClusterSpec(Membe val latch = TestLatch() val expectedAddresses = Set(first, second, third).map(address) - cluster.subscribe(system.actorOf(Props(new Actor { - var members = Set.empty[Member] - def receive = { - case state: CurrentClusterState => members = state.members - case MemberUp(m) => - members = members - m + m - if (members.map(_.address) == expectedAddresses) - latch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + var members = Set.empty[Member] + def receive = { + case state: CurrentClusterState => members = state.members + case MemberUp(m) => + members = members - m + m + if (members.map(_.address) == expectedAddresses) + latch.countDown() + case _ => // ignore + } + }).withDeploy(Deploy.local)), classOf[MemberEvent]) enterBarrier("listener-2-registered") runOn(third) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala index 3d2b51b769..ab57f3b970 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -92,7 +92,8 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeClusterSpec(Multi expectedBetaHeartbeaterNodes = takeNOldestMembers(dataCenter = "beta", 2) expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) - expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles ++ expectedBetaHeartbeaterRoles) + expectedNoActiveHeartbeatSenderRoles = + roles.toSet -- (expectedAlphaHeartbeaterRoles ++ expectedBetaHeartbeaterRoles) } "collect information on oldest nodes" taggedAs LongRunningTest in { @@ -157,7 +158,7 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeClusterSpec(Multi implicit val sender: ActorRef = observer.ref val expectedAlphaMonitoringNodesAfterLeaving = - (takeNOldestMembers(dataCenter = "alpha", 3).filterNot(_.status == MemberStatus.Exiting)) + takeNOldestMembers(dataCenter = "alpha", 3).filterNot(_.status == MemberStatus.Exiting) runOn(membersAsRoles(expectedAlphaMonitoringNodesAfterLeaving).toList: _*) { awaitAssert( { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index e965191240..fbf97572ba 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -303,9 +303,9 @@ abstract class MultiDcSplitBrainSpec extends MultiNodeClusterSpec(MultiDcSplitBr } runOn(first, second, third) { - awaitAssert({ + awaitAssert { clusterView.members.map(_.address) should ===(Set(address(first), address(second), address(third))) - }) + } } runOn(remainingRoles: _*) { enterBarrier("restarted-fifth-removed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index 3bdd4e2829..062b0ccd8c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -82,7 +82,7 @@ abstract class MultiDcSunnyWeatherSpec extends MultiNodeClusterSpec(MultiDcSunny val expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) val expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles.union( - expectedBetaHeartbeaterRoles)) + expectedBetaHeartbeaterRoles)) enterBarrier("found-expectations") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 502c0de0ae..b593d74f29 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -245,7 +245,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) awaitCond( { if (memberInState(joinNode, List(MemberStatus.Up)) && - memberInState(myself, List(MemberStatus.Joining, MemberStatus.Up))) + memberInState(myself, List(MemberStatus.Joining, MemberStatus.Up))) true else { cluster.join(joinNode) @@ -290,7 +290,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) */ def assertLeaderIn(nodesInCluster: immutable.Seq[RoleName]): Unit = if (nodesInCluster.contains(myself)) { - nodesInCluster.length should not be (0) + nodesInCluster.length should not be 0 val expectedLeader = roleOfLeader(nodesInCluster) val leader = clusterView.leader val isLeader = leader == Some(clusterView.selfAddress) @@ -310,7 +310,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) timeout: FiniteDuration = 25.seconds): Unit = { within(timeout) { if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set - awaitAssert(canNotBePartOfMemberRing.foreach(a => clusterView.members.map(_.address) should not contain (a))) + awaitAssert(canNotBePartOfMemberRing.foreach(a => clusterView.members.map(_.address) should not contain a)) awaitAssert(clusterView.members.size should ===(numberOfMembers)) awaitAssert(clusterView.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up))) // clusterView.leader is updated by LeaderChanged, await that to be updated also @@ -380,7 +380,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) * be determined from the `RoleName`. */ def roleOfLeader(nodesInCluster: immutable.Seq[RoleName] = roles): RoleName = { - nodesInCluster.length should not be (0) + nodesInCluster.length should not be 0 nodesInCluster.sorted.head } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 11689d66e5..e475d433fd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -37,16 +37,16 @@ abstract class NodeLeavingAndExitingSpec extends MultiNodeClusterSpec(NodeLeavin runOn(first, third) { val secondAddess = address(second) val exitingLatch = TestLatch() - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case state: CurrentClusterState => - if (state.members.exists(m => m.address == secondAddess && m.status == Exiting)) - exitingLatch.countDown() - case MemberExited(m) if m.address == secondAddess => exitingLatch.countDown() - case _: MemberRemoved => // not tested here - - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case state: CurrentClusterState => + if (state.members.exists(m => m.address == secondAddess && m.status == Exiting)) + exitingLatch.countDown() + case MemberExited(m) if m.address == secondAddess => exitingLatch.countDown() + case _: MemberRemoved => // not tested here + } + }).withDeploy(Deploy.local)), classOf[MemberEvent]) enterBarrier("registered-listener") runOn(third) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index f657b51a24..b3548f6c59 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -52,12 +52,12 @@ abstract class NodeUpSpec extends MultiNodeClusterSpec(NodeUpMultiJvmSpec) { val unexpected = new AtomicReference[SortedSet[Member]](SortedSet.empty) cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case event: MemberEvent => - unexpected.set(unexpected.get + event.member) - case _: CurrentClusterState => // ignore - } - })), classOf[MemberEvent]) + def receive = { + case event: MemberEvent => + unexpected.set(unexpected.get + event.member) + case _: CurrentClusterState => // ignore + } + })), classOf[MemberEvent]) enterBarrier("listener-registered") runOn(second) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala index b6d409f923..3ef694b8c3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala @@ -79,12 +79,12 @@ abstract class RestartFirstSeedNodeSpec // we must transfer its address to seed2 and seed3 runOn(seed2, seed3) { system.actorOf(Props(new Actor { - def receive = { - case a: Address => - seedNode1Address = a - sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + def receive = { + case a: Address => + seedNode1Address = a + sender() ! "ok" + } + }).withDeploy(Deploy.local), name = "address-receiver") enterBarrier("seed1-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala index 09c265af14..55a37a943f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala @@ -81,12 +81,12 @@ abstract class RestartNode2SpecSpec extends MultiNodeClusterSpec(RestartNode2Spe // we must transfer its address to seed2 runOn(seed2) { system.actorOf(Props(new Actor { - def receive = { - case a: Address => - seedNode1Address = a - sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + def receive = { + case a: Address => + seedNode1Address = a + sender() ! "ok" + } + }).withDeploy(Deploy.local), name = "address-receiver") enterBarrier("seed1-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala index e7d11cd55f..15bb27f6bc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala @@ -84,12 +84,12 @@ abstract class RestartNode3Spec extends MultiNodeClusterSpec(RestartNode3MultiJv // we must transfer its address to first runOn(first, third) { system.actorOf(Props(new Actor { - def receive = { - case a: UniqueAddress => - secondUniqueAddress = a - sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + def receive = { + case a: UniqueAddress => + secondUniqueAddress = a + sender() ! "ok" + } + }).withDeploy(Deploy.local), name = "address-receiver") enterBarrier("second-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala index 307c828961..2c574c22bb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala @@ -102,12 +102,12 @@ abstract class RestartNodeSpec extends MultiNodeClusterSpec(RestartNodeMultiJvmS // we must transfer its address to first runOn(first, third) { system.actorOf(Props(new Actor { - def receive = { - case a: UniqueAddress => - secondUniqueAddress = a - sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + def receive = { + case a: UniqueAddress => + secondUniqueAddress = a + sender() ! "ok" + } + }).withDeploy(Deploy.local), name = "address-receiver") enterBarrier("second-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala index d8c321fa78..01706c9872 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala @@ -43,16 +43,18 @@ object SharedMediaDriverSupport { @tailrec def isDriverInactive(i: Int): Boolean = { if (i < 0) true else { - val active = try CommonContext.isDriverActive(new File(aeronDir), 5000, new Consumer[String] { - override def accept(msg: String): Unit = { - println(msg) + val active = + try CommonContext.isDriverActive(new File(aeronDir), 5000, + new Consumer[String] { + override def accept(msg: String): Unit = { + println(msg) + } + }) + catch { + case NonFatal(e) => + println("Exception checking isDriverActive: " + e.getMessage) + false } - }) - catch { - case NonFatal(e) => - println("Exception checking isDriverActive: " + e.getMessage) - false - } if (active) false else { Thread.sleep(500) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala index 54f876a283..47f91a4f71 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala @@ -255,8 +255,8 @@ abstract class StreamRefSpec extends MultiNodeClusterSpec(StreamRefSpec) with Im // and it triggered the subscription timeout. Therefore we must wait more than the // the subscription timeout for a failure val timeout = system.settings.config - .getDuration("akka.stream.materializer.stream-ref.subscription-timeout") - .asScala + 2.seconds + .getDuration("akka.stream.materializer.stream-ref.subscription-timeout") + .asScala + 2.seconds streamLifecycle3.expectMsg(timeout, "failed-system-42-tmp") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala index 0310e8054d..a7132781de 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala @@ -136,8 +136,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { val numberOfNodesJoiningToOneNode = getInt("nr-of-nodes-joining-to-one") * nFactor // remaining will join to seed nodes val numberOfNodesJoiningToSeedNodes = (totalNumberOfNodes - numberOfSeedNodes - - numberOfNodesJoiningToSeedNodesInitially - numberOfNodesJoiningOneByOneSmall - - numberOfNodesJoiningOneByOneLarge - numberOfNodesJoiningToOneNode) + numberOfNodesJoiningToSeedNodesInitially - numberOfNodesJoiningOneByOneSmall - + numberOfNodesJoiningOneByOneLarge - numberOfNodesJoiningToOneNode) .requiring(_ >= 0, s"too many configured nr-of-nodes-joining-*, total should be <= ${totalNumberOfNodes}") val numberOfNodesLeavingOneByOneSmall = getInt("nr-of-nodes-leaving-one-by-one-small") * nFactor val numberOfNodesLeavingOneByOneLarge = getInt("nr-of-nodes-leaving-one-by-one-large") * nFactor diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 4595585031..0b46859fb0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -66,13 +66,13 @@ abstract class SunnyWeatherSpec extends MultiNodeClusterSpec(SunnyWeatherMultiJv val unexpected = new AtomicReference[SortedSet[Member]](SortedSet.empty) cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case event: MemberEvent => - // we don't expected any changes to the cluster - unexpected.set(unexpected.get + event.member) - case _: CurrentClusterState => // ignore - } - })), classOf[MemberEvent]) + def receive = { + case event: MemberEvent => + // we don't expected any changes to the cluster + unexpected.set(unexpected.get + event.member) + case _: CurrentClusterState => // ignore + } + })), classOf[MemberEvent]) for (n <- 1 to 30) { enterBarrier("period-" + n) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala index 44f8d2f552..8f975818c0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala @@ -238,7 +238,7 @@ abstract class SurviveNetworkInstabilitySpec val joining = Vector(sixth, seventh) val others = Vector(second, third, fourth, fifth) runOn(first) { - for (role1 <- (joining :+ first); role2 <- others) { + for (role1 <- joining :+ first; role2 <- others) { testConductor.blackhole(role1, role2, Direction.Both).await } } @@ -265,7 +265,7 @@ abstract class SurviveNetworkInstabilitySpec enterBarrier("more-unreachable-5") runOn(first) { - for (role1 <- (joining :+ first); role2 <- others) { + for (role1 <- joining :+ first; role2 <- others) { testConductor.passThrough(role1, role2, Direction.Both).await } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala index e48f4663e8..dd72475c90 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala @@ -124,7 +124,8 @@ abstract class ClusterConsistentHashingRouterSpec val router2 = system.actorOf( ClusterRouterPool( local = ConsistentHashingPool(nrOfInstances = 0), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true)) + settings = + ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true)) .props(Props[Echo]()), "router2") // it may take some time until router receives cluster member events diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala index 7f02625519..4ec5427e6d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala @@ -65,9 +65,9 @@ abstract class UseRoleIgnoredSpec def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = { val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) - (receiveWhile(5 seconds, messages = expectedReplies) { + receiveWhile(5 seconds, messages = expectedReplies) { case Reply(`routeeType`, ref) => fullAddress(ref) - }).foldLeft(zero) { + }.foldLeft(zero) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala index 75d15827a1..34e8b4fd55 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala @@ -54,7 +54,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { "be able to parse 'akka.actor.deployment._' with specified cluster pool" in { val service = "/user/service1" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) - deployment should not be (None) + deployment should not be None deployment should ===( Some(Deploy( @@ -71,7 +71,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { "be able to parse 'akka.actor.deployment._' with specified cluster group" in { val service = "/user/service2" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) - deployment should not be (None) + deployment should not be None deployment should ===( Some(Deploy( diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala index 1a71f48fde..e5133270a9 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala @@ -182,13 +182,13 @@ class ClusterHeartbeatSenderStateSpec extends AnyWordSpec with Matchers { val oldUnreachable = state.oldReceiversNowUnreachable state = state.removeMember(node) // keep unreachable, unless it was the removed - if (oldUnreachable(node))(oldUnreachable.diff(state.activeReceivers)) should ===(Set(node)) + if (oldUnreachable(node)) (oldUnreachable.diff(state.activeReceivers)) should ===(Set(node)) else (oldUnreachable.diff(state.activeReceivers)) should ===(Set.empty) state.failureDetector.isMonitoring(node.address) should ===(false) state.failureDetector.isAvailable(node.address) should ===(true) - state.activeReceivers should not contain (node) + state.activeReceivers should not contain node } case Unreachable => @@ -207,7 +207,7 @@ class ClusterHeartbeatSenderStateSpec extends AnyWordSpec with Matchers { state = state.heartbeatRsp(node) if (oldUnreachable(node)) - state.oldReceiversNowUnreachable should not contain (node) + state.oldReceiversNowUnreachable should not contain node if (oldUnreachable(node) && !oldRingReceivers(node)) state.failureDetector.isMonitoring(node.address) should ===(false) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 3847eaf6d4..98fee94168 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -67,8 +67,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { "register jmx mbean" in { val name = new ObjectName("akka:type=Cluster") val info = ManagementFactory.getPlatformMBeanServer.getMBeanInfo(name) - info.getAttributes.length should be > (0) - info.getOperations.length should be > (0) + info.getAttributes.length should be > 0 + info.getOperations.length should be > 0 } "reply with InitJoinNack for InitJoin before joining" in { @@ -356,13 +356,13 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val name1 = new ObjectName(s"akka:type=Cluster,port=2552") val info1 = ManagementFactory.getPlatformMBeanServer.getMBeanInfo(name1) - info1.getAttributes.length should be > (0) - info1.getOperations.length should be > (0) + info1.getAttributes.length should be > 0 + info1.getOperations.length should be > 0 val name2 = new ObjectName(s"akka:type=Cluster,port=2553") val info2 = ManagementFactory.getPlatformMBeanServer.getMBeanInfo(name2) - info2.getAttributes.length should be > (0) - info2.getOperations.length should be > (0) + info2.getAttributes.length should be > 0 + info2.getOperations.length should be > 0 } finally { shutdown(sys1) shutdown(sys2) diff --git a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala index 8c559bd82a..75f3f088b6 100644 --- a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala @@ -78,18 +78,19 @@ class DowningProviderSpec extends AnyWordSpec with Matchers { // race condition where the downing provider failure can be detected and trigger // graceful shutdown fast enough that creating the actor system throws on constructing // thread (or slow enough that we have time to try join the cluster before noticing) - val maybeSystem = try { - Some( - ActorSystem( - "auto-downing", - ConfigFactory.parseString(""" + val maybeSystem = + try { + Some( + ActorSystem( + "auto-downing", + ConfigFactory.parseString(""" akka.cluster.downing-provider-class="akka.cluster.FailingDowningProvider" """).withFallback(baseConf))) - } catch { - case NonFatal(_) => - // expected to sometimes happen - None - } + } catch { + case NonFatal(_) => + // expected to sometimes happen + None + } maybeSystem.foreach { system => val cluster = Cluster(system) diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 8d683af2b2..957dbafe12 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -96,7 +96,7 @@ class GossipSpec extends AnyWordSpec with Matchers { "not reach convergence when unreachable" in { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, a1.uniqueAddress) - val g1 = (Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1))) + val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1)) .seen(a1.uniqueAddress) .seen(b1.uniqueAddress) state(g1, b1).convergence(Set.empty) should ===(false) @@ -107,7 +107,7 @@ class GossipSpec extends AnyWordSpec with Matchers { "reach convergence when downed node has observed unreachable" in { // e3 is Down val r1 = Reachability.empty.unreachable(e3.uniqueAddress, a1.uniqueAddress) - val g1 = (Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1))) + val g1 = Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1)) .seen(a1.uniqueAddress) .seen(b1.uniqueAddress) .seen(e3.uniqueAddress) @@ -422,7 +422,7 @@ class GossipSpec extends AnyWordSpec with Matchers { .remove(dc2d1.uniqueAddress, System.currentTimeMillis()) gdc2.tombstones.keys should contain(dc2d1.uniqueAddress) - gdc2.members should not contain (dc2d1) + gdc2.members should not contain dc2d1 gdc2.overview.reachability.records.filter(r => r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) gdc2.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) @@ -432,7 +432,7 @@ class GossipSpec extends AnyWordSpec with Matchers { merged1.members should ===(SortedSet(dc1a1, dc1b1, dc2c1)) merged1.tombstones.keys should contain(dc2d1.uniqueAddress) - merged1.members should not contain (dc2d1) + merged1.members should not contain dc2d1 merged1.overview.reachability.records.filter(r => r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) merged1.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) diff --git a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala index 3236cb9063..1199208cf8 100644 --- a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala @@ -29,7 +29,7 @@ class HeartbeatNodeRingSpec extends AnyWordSpec with Matchers { nodes.foreach { n => val receivers = ring.receivers(n) receivers.size should ===(3) - receivers should not contain (n) + receivers should not contain n } } diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala index c1ea4a552d..a255c6b750 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala @@ -62,15 +62,15 @@ class MemberOrderingSpec extends AnyWordSpec with Matchers { m1 should ===(m2) m1.hashCode should ===(m2.hashCode) - m3 should not be (m2) - m3 should not be (m1) + m3 should not be m2 + m3 should not be m1 m11 should ===(m22) m11.hashCode should ===(m22.hashCode) // different uid - m1 should not be (m11) - m2 should not be (m22) + m1 should not be m11 + m2 should not be m22 } "have consistent ordering and equals" in { diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala index c175e76a22..5ddf2408e3 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala @@ -35,11 +35,12 @@ class ClusterRouterSupervisorSpec extends AkkaSpec(""" "use provided supervisor strategy" in { val router = system.actorOf( ClusterRouterPool( - RoundRobinPool(nrOfInstances = 1, supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { - case _ => - testActor ! "supervised" - SupervisorStrategy.Stop - }), + RoundRobinPool(nrOfInstances = 1, + supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { + case _ => + testActor ! "supervised" + SupervisorStrategy.Stop + }), ClusterRouterPoolSettings(totalInstances = 1, maxInstancesPerNode = 1, allowLocalRoutees = true)) .props(Props(classOf[KillableActor])), name = "therouter") diff --git a/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala b/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala index 6c3a8428d6..3d4047057b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala @@ -119,11 +119,13 @@ class SplitBrainResolverSpec new LeaseSettings("akka-sbr", "test", new TimeoutSettings(1.second, 2.minutes, 3.seconds), ConfigFactory.empty) def createReachability(unreachability: Seq[(Member, Member)]): Reachability = { - Reachability(unreachability.map { - case (from, to) => Reachability.Record(from.uniqueAddress, to.uniqueAddress, Reachability.Unreachable, 1) - }.toIndexedSeq, unreachability.map { - case (from, _) => from.uniqueAddress -> 1L - }.toMap) + Reachability( + unreachability.map { + case (from, to) => Reachability.Record(from.uniqueAddress, to.uniqueAddress, Reachability.Unreachable, 1) + }.toIndexedSeq, + unreachability.map { + case (from, _) => from.uniqueAddress -> 1L + }.toMap) } def extSystem: ExtendedActorSystem = system.asInstanceOf[ExtendedActorSystem] @@ -1332,7 +1334,8 @@ class SplitBrainResolverSpec stop() } - "down minority partition" in new SetupKeepMajority(stableAfter = Duration.Zero, memberA.uniqueAddress, role = None) { + "down minority partition" in new SetupKeepMajority(stableAfter = Duration.Zero, memberA.uniqueAddress, + role = None) { memberUp(memberA, memberB, memberC, memberD, memberE) leader(memberA) reachabilityChanged(memberA -> memberB, memberC -> memberD) diff --git a/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala b/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala index e33b4a8068..e214a6e457 100644 --- a/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala +++ b/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala @@ -182,7 +182,6 @@ private[cluster] abstract class AutoDownBase(autoDownUnreachableAfter: FiniteDur } case _: ClusterDomainEvent => // not interested in other events - } def unreachableMember(m: Member): Unit = diff --git a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala index ada7a80350..6a02878055 100644 --- a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala @@ -279,7 +279,6 @@ case object Lookup { /** * Implement to provide a service discovery method - * */ abstract class ServiceDiscovery { @@ -310,7 +309,6 @@ abstract class ServiceDiscovery { * eagerness to wait for a result for this specific lookup. * * The returned future should be failed once resolveTimeout has passed with a [[DiscoveryTimeoutException]]. - * */ def lookup(query: Lookup, resolveTimeout: java.time.Duration): CompletionStage[Resolved] = { import scala.compat.java8.FutureConverters._ diff --git a/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala b/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala index 83631024c3..69885b5100 100644 --- a/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala +++ b/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala @@ -12,28 +12,28 @@ import scala.concurrent.ExecutionContext.Implicits.global object CompileOnlySpec { - //#loading + // #loading import akka.discovery.Discovery val system = ActorSystem() val serviceDiscovery = Discovery(system).discovery - //#loading + // #loading - //#basic + // #basic import akka.discovery.Lookup serviceDiscovery.lookup(Lookup("akka.io"), 1.second) // Convenience for a Lookup with only a serviceName serviceDiscovery.lookup("akka.io", 1.second) - //#basic + // #basic - //#full + // #full import akka.discovery.Lookup import akka.discovery.ServiceDiscovery.Resolved val lookup: Future[Resolved] = serviceDiscovery.lookup(Lookup("akka.io").withPortName("remoting").withProtocol("tcp"), 1.second) - //#full + // #full // compiler lookup.foreach(println) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala index cf7df72dc2..d6bd6eceb5 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala @@ -200,7 +200,7 @@ final class LWWMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, L // this class cannot be a `case class` because we need different `unapply` - override def toString: String = s"LWW$entries" //e.g. LWWMap(a -> 1, b -> 2) + override def toString: String = s"LWW$entries" // e.g. LWWMap(a -> 1, b -> 2) override def equals(o: Any): Boolean = o match { case other: LWWMap[_, _] => underlying == other.underlying diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala index 0ba42ba296..16020e65ba 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala @@ -445,8 +445,8 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( val keyDelta = putOp.underlying mergedKeys = mergedKeys.mergeDelta(keyDelta) mergedValues = mergedValues + putOp - .asInstanceOf[PutDeltaOp[A, B]] - .value // put is destructive and propagates only full values of B! + .asInstanceOf[PutDeltaOp[A, B]] + .value // put is destructive and propagates only full values of B! case removeOp: RemoveDeltaOp[_, _] => val removedKey = removeOp.underlying match { // if op is RemoveDeltaOp then it must have exactly one element in the elements @@ -461,8 +461,8 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( // removeKeyOp tombstones values for later use if (mergedValues.contains(removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey)) { tombstonedVals = tombstonedVals + (removeKeyOp - .asInstanceOf[RemoveKeyDeltaOp[A, B]] - .removedKey -> mergedValues(removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey)) + .asInstanceOf[RemoveKeyDeltaOp[A, B]] + .removedKey -> mergedValues(removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey)) } mergedValues = mergedValues - removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey mergedKeys = mergedKeys.mergeDelta(removeKeyOp.underlying) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala index 0aecf07e38..e016ccf28f 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala @@ -95,8 +95,9 @@ final class ORMultiMap[A, B] private[akka] ( */ def entries: Map[A, Set[B]] = if (withValueDeltas) - underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) => k -> v.elements } else - underlying.entries.map { case (k, v) => k -> v.elements } + underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) => k -> v.elements } + else + underlying.entries.map { case (k, v) => k -> v.elements } /** * Java API: All entries of a multimap where keys are strings and values are sets. @@ -107,7 +108,8 @@ final class ORMultiMap[A, B] private[akka] ( if (withValueDeltas) underlying.entries.foreach { case (k, v) => if (underlying.keys.elements.contains(k)) result.put(k, v.elements.asJava) - } else + } + else underlying.entries.foreach { case (k, v) => result.put(k, v.elements.asJava) } result } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala index b5802ee8cb..d4fb75b620 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala @@ -440,7 +440,8 @@ final class ORSet[A] private[akka] ( val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that) val entries0 = if (addDeltaOp) - entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } else { + entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } + else { val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains) ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00) } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index 8abab273d1..5c6292cc17 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -877,7 +877,7 @@ object Replicator { def key: Key[A] def request: Option[Any] - /** Java API*/ + /** Java API */ def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) } final case class DeleteSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends DeleteResponse[A] @@ -1111,10 +1111,10 @@ object Replicator { extends ReplicatorMessage with DestinationSystemUid { override def toString: String = - (digests + digests .map { case (key, bytes) => key + " -> " + bytes.map(byte => f"$byte%02x").mkString("") - }) + } .mkString("Status(", ", ", ")") } final case class Gossip( @@ -1379,7 +1379,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog new PayloadSizeAggregator(log, sizeExceeding, maxFrameSize) } - //Start periodic gossip to random nodes in cluster + // Start periodic gossip to random nodes in cluster import context.dispatcher val gossipTask = context.system.scheduler.scheduleWithFixedDelay(gossipInterval, gossipInterval, self, GossipTick) val notifyTask = @@ -2149,8 +2149,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } val chunk = (statusCount % totChunks).toInt val status = Status(dataEntries.collect { - case (key, (_, _)) if math.abs(key.hashCode % totChunks) == chunk => (key, getDigest(key)) - }, chunk, totChunks, toSystemUid, selfFromSystemUid) + case (key, (_, _)) if math.abs(key.hashCode % totChunks) == chunk => (key, getDigest(key)) + }, chunk, totChunks, toSystemUid, selfFromSystemUid) to ! status } } @@ -2167,7 +2167,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog log.debug( "Received gossip status from [{}], chunk [{}] of [{}] containing [{}].", replyTo.path.address, - (chunk + 1), + chunk + 1, totChunks, otherDigests.keys.mkString(", ")) @@ -2298,7 +2298,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def hasSubscriber(subscriber: ActorRef): Boolean = - subscribers.exists { case (_, s) => s.contains(subscriber) } || + subscribers.exists { case (_, s) => s.contains(subscriber) } || newSubscribers.exists { case (_, s) => s.contains(subscriber) } def receiveTerminated(ref: ActorRef): Unit = { @@ -2828,7 +2828,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog replyTo.tell(replyMsg, context.parent) context.stop(self) case _: ReadResult => - //collect late replies + // collect late replies remaining -= sender().path.address case SendToSecondary => case ReceiveTimeout => diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala index 50f8a92c9e..c5b5037631 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala @@ -92,8 +92,7 @@ private object ReplicatedDataSerializer { } sealed trait ProtoMapEntryWriter[ - Entry <: GeneratedMessageV3, - EntryBuilder <: GeneratedMessageV3.Builder[EntryBuilder], + Entry <: GeneratedMessageV3, EntryBuilder <: GeneratedMessageV3.Builder[EntryBuilder], Value <: GeneratedMessageV3] { def setStringKey(builder: EntryBuilder, key: String, value: Value): Entry def setLongKey(builder: EntryBuilder, key: Long, value: Value): Entry @@ -656,10 +655,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) * Convert a Map[A, B] to an Iterable[Entry] where Entry is the protobuf map entry. */ private def getEntries[ - IKey, - IValue, - EntryBuilder <: GeneratedMessageV3.Builder[EntryBuilder], - PEntry <: GeneratedMessageV3, + IKey, IValue, EntryBuilder <: GeneratedMessageV3.Builder[EntryBuilder], PEntry <: GeneratedMessageV3, PValue <: GeneratedMessageV3]( input: Map[IKey, IValue], createBuilder: () => EntryBuilder, diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala index cd21a06b3f..073c4cb0d3 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala @@ -345,7 +345,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def getToProto(get: Get[_]): dm.Get = { val timoutInMillis = get.consistency.timeout.toMillis - require(timoutInMillis <= 0XFFFFFFFFL, "Timeouts must fit in a 32-bit unsigned int") + require(timoutInMillis <= 0xFFFFFFFFL, "Timeouts must fit in a 32-bit unsigned int") val b = dm.Get.newBuilder().setKey(otherMessageToProto(get.key)).setTimeout(timoutInMillis.toInt) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala index 3c5b98ad53..cfbc2b6577 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala @@ -99,13 +99,14 @@ trait SerializationSupport { .setUid2((uniqueAddress.longUid >> 32).toInt) def uniqueAddressFromProto(uniqueAddress: dm.UniqueAddress): UniqueAddress = - UniqueAddress(addressFromProto(uniqueAddress.getAddress), if (uniqueAddress.hasUid2) { - // new remote node join the two parts of the long uid back - (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0XFFFFFFFFL) - } else { - // old remote node - uniqueAddress.getUid.toLong - }) + UniqueAddress(addressFromProto(uniqueAddress.getAddress), + if (uniqueAddress.hasUid2) { + // new remote node join the two parts of the long uid back + (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0xFFFFFFFFL) + } else { + // old remote node + uniqueAddress.getUid.toLong + }) def versionVectorToProto(versionVector: VersionVector): dm.VersionVector = { val b = dm.VersionVector.newBuilder() diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala index 9d4f99a280..fea9d17eba 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala @@ -56,7 +56,8 @@ object DurableDataSpec { if (failStore) reply match { case Some(StoreReply(_, failureMsg, replyTo)) => replyTo ! failureMsg case None => - } else + } + else reply match { case Some(StoreReply(successMsg, _, replyTo)) => replyTo ! successMsg case None => diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala index 8a5af20cbd..4a88702f11 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala @@ -82,11 +82,11 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN val probe2 = TestProbe()(sys2) Cluster(sys2).join(node(first).address) awaitAssert({ - Cluster(system).state.members.size should ===(4) - Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - Cluster(sys2).state.members.size should ===(4) - Cluster(sys2).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - }, 10.seconds) + Cluster(system).state.members.size should ===(4) + Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + Cluster(sys2).state.members.size should ===(4) + Cluster(sys2).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + }, 10.seconds) enterBarrier("joined") within(5.seconds) { @@ -170,9 +170,9 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN cluster3.join(node(first).address) awaitAssert({ - cluster.state.members.exists(m => - m.uniqueAddress == cluster3.selfUniqueAddress && m.status == MemberStatus.Up) should ===(true) - }, 10.seconds) + cluster.state.members.exists(m => + m.uniqueAddress == cluster3.selfUniqueAddress && m.status == MemberStatus.Up) should ===(true) + }, 10.seconds) within(10.seconds) { var values = Set.empty[Int] @@ -189,9 +189,9 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN // all must at least have seen it as joining awaitAssert({ - cluster3.state.members.size should ===(4) - cluster3.state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - }, 10.seconds) + cluster3.state.members.size should ===(4) + cluster3.state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + }, 10.seconds) // after merging with others replicator3 ! Get(KeyA, ReadAll(remainingOrDefault)) diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala index 01e8605d6f..afee11f760 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala @@ -126,7 +126,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(3.seconds) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -159,7 +159,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(timeout + 1.second) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -171,7 +171,7 @@ class JepsenInspiredInsertSpec val readProbe = TestProbe() replicator.tell(Get(key, readMajority), readProbe.ref) val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) => g.get(key) } - //val survivors = result.elements.size + // val survivors = result.elements.size result.elements should be(expectedData) } @@ -203,7 +203,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(3.seconds) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -248,7 +248,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(timeout + 1.second) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } runOn(n1, n4, n5) { successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) @@ -257,7 +257,7 @@ class JepsenInspiredInsertSpec runOn(n2, n3) { // without delays all could theoretically have been written before the blackhole if (delayMillis != 0) - failureWriteAcks should not be (Nil) + failureWriteAcks should not be Nil } (successWriteAcks.size + failureWriteAcks.size) should be(myData.size) @@ -268,7 +268,7 @@ class JepsenInspiredInsertSpec val readProbe = TestProbe() replicator.tell(Get(key, readMajority), readProbe.ref) val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) => g.get(key) } - //val survivors = result.elements.size + // val survivors = result.elements.size result.elements should be(expectedData) } // but on the 3 node side, read from majority doesn't mean that we are guaranteed to see diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala index 33557e38c3..1ff49d232b 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala @@ -166,8 +166,8 @@ class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpe val n = 1000 * factor val expectedData = (0 until n).toSet repeat("ORSet Update WriteLocal", keys, n)({ (key, i, replyTo) => - replicator.tell(Update(key, ORSet(), WriteLocal)(_ :+ i), replyTo) - }, key => awaitReplicated(key, expectedData)) + replicator.tell(Update(key, ORSet(), WriteLocal)(_ :+ i), replyTo) + }, key => awaitReplicated(key, expectedData)) enterBarrier("after-1") } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala index ee9e6c57b1..25fddd9384 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala @@ -451,9 +451,9 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec replicator.tell(Get(KeyE, readMajority), probe2.ref) probe2.expectMsgType[GetSuccess[_]] replicator.tell(Update(KeyE, GCounter(), writeMajority, None) { data => - probe1.ref ! data.value - data :+ 1 - }, probe2.ref) + probe1.ref ! data.value + data :+ 1 + }, probe2.ref) // verify read your own writes, without waiting for the UpdateSuccess reply // note that the order of the replies are not defined, and therefore we use separate probes val probe3 = TestProbe() diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala index b1ea81356c..77f2f1a9fc 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala @@ -21,9 +21,10 @@ object DeltaPropagationSelectorSpec { extends DeltaPropagationSelector { override val gossipIntervalDivisor = 5 override def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation = - DeltaPropagation(selfUniqueAddress, false, deltas.map { - case (key, (d, fromSeqNr, toSeqNr)) => (key, Delta(DataEnvelope(d), fromSeqNr, toSeqNr)) - }) + DeltaPropagation(selfUniqueAddress, false, + deltas.map { + case (key, (d, fromSeqNr, toSeqNr)) => (key, Delta(DataEnvelope(d), fromSeqNr, toSeqNr)) + }) override def maxDeltaSize: Int = 10 } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala index 77481e31e1..e22eee783c 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala @@ -113,7 +113,6 @@ class LotsOfDataBot extends Actor with ActorLogging { } case _: UpdateResponse[_] => // ignore - case c @ Changed(ORSetKey(id)) => val elements = c.dataValue match { case ORSet(e) => e diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala index dae105b8e5..2db0e68048 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala @@ -69,7 +69,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { "be able to remove entry" in { val m = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B").remove(node1, "a") - m.entries.keySet should not contain ("a") + m.entries.keySet should not contain "a" m.entries.keySet should contain("b") } @@ -83,13 +83,13 @@ class ORMapSpec extends AnyWordSpec with Matchers { m1.entries.keySet should contain("a") val m2 = m1.mergeDelta(removeDelta) - m2.entries.keySet should not contain ("a") + m2.entries.keySet should not contain "a" m2.entries.keySet should contain("b") } "be able to add removed" in { val m = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B").remove(node1, "a") - m.entries.keySet should not contain ("a") + m.entries.keySet should not contain "a" m.entries.keySet should contain("b") val m2 = m.put(node1, "a", GSet() + "C") m2.entries.keySet should contain("a") @@ -366,7 +366,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { .updated(node1, "b", ORSet.empty[String])(_.add(node1, "B3")) .updated(node2, "b", ORSet.empty[String])(_.add(node2, "B4")) - val merged1 = (m1.merge(m2d)).mergeDelta(m2u.delta.get) + val merged1 = m1.merge(m2d).mergeDelta(m2u.delta.get) merged1.entries("a").elements should be(Set("A")) // note that B1 is lost as it was added and removed earlier in timeline than B2 @@ -391,7 +391,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { merged2.entries("b").elements should be(Set("B3")) merged2.entries("c").elements should be(Set("C")) - val merged3 = (merged1.mergeDelta(m3.delta.get)).mergeDelta(m4.delta.get) + val merged3 = merged1.mergeDelta(m3.delta.get).mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B3")) @@ -418,7 +418,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { merged2.entries("b").elements should be(Set("B2", "B3")) merged2.entries("c").elements should be(Set("C")) - val merged3 = (merged1.mergeDelta(m3.delta.get)).mergeDelta(m4.delta.get) + val merged3 = merged1.mergeDelta(m3.delta.get).mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B2", "B3")) diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala index 4f83220da2..8f72ed7df2 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala @@ -59,16 +59,16 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c4 = c3.remove(node1, user2) val c5 = c4.remove(node1, user1) - c5.elements should not contain (user1) - c5.elements should not contain (user2) + c5.elements should not contain user1 + c5.elements should not contain user2 val c6 = c3.merge(c5) - c6.elements should not contain (user1) - c6.elements should not contain (user2) + c6.elements should not contain user1 + c6.elements should not contain user2 val c7 = c5.merge(c3) - c7.elements should not contain (user1) - c7.elements should not contain (user2) + c7.elements should not contain user1 + c7.elements should not contain user2 } "be able to add removed" in { @@ -77,7 +77,7 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c3 = c2.add(node1, user1) c3.elements should contain(user1) val c4 = c3.remove(node1, user1) - c4.elements should not contain (user1) + c4.elements should not contain user1 val c5 = c4.add(node1, user1) c5.elements should contain(user1) } @@ -88,7 +88,7 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c2 = c1.add(node1, user1) val c3 = c2.add(node1, user2) val c4 = c3.remove(node1, user1) - c4.elements should not contain (user1) + c4.elements should not contain user1 c4.elements should contain(user2) val c5 = c4.add(node1, user1) @@ -99,7 +99,7 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c7 = c6.remove(node1, user1) val c8 = c7.add(node1, user2) val c9 = c8.remove(node1, user1) - c9.elements should not contain (user1) + c9.elements should not contain user1 c9.elements should contain(user2) } @@ -112,20 +112,20 @@ class ORSetSpec extends AnyWordSpec with Matchers { // set 2 val c2 = ORSet().add(node2, user3).add(node2, user4).remove(node2, user3) - c2.elements should not contain (user3) + c2.elements should not contain user3 c2.elements should contain(user4) // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should contain(user2) - merged1.elements should not contain (user3) + merged1.elements should not contain user3 merged1.elements should contain(user4) val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should contain(user2) - merged2.elements should not contain (user3) + merged2.elements should not contain user3 merged2.elements should contain(user4) } @@ -133,29 +133,29 @@ class ORSetSpec extends AnyWordSpec with Matchers { // set 1 val c1 = ORSet().add(node1, user1).add(node1, user2).add(node1, user3).remove(node1, user1).remove(node1, user3) - c1.elements should not contain (user1) + c1.elements should not contain user1 c1.elements should contain(user2) - c1.elements should not contain (user3) + c1.elements should not contain user3 // set 2 val c2 = ORSet().add(node2, user1).add(node2, user2).add(node2, user3).add(node2, user4).remove(node2, user3) c2.elements should contain(user1) c2.elements should contain(user2) - c2.elements should not contain (user3) + c2.elements should not contain user3 c2.elements should contain(user4) // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should contain(user2) - merged1.elements should not contain (user3) + merged1.elements should not contain user3 merged1.elements should contain(user4) val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should contain(user2) - merged2.elements should not contain (user3) + merged2.elements should not contain user3 merged2.elements should contain(user4) } @@ -169,19 +169,19 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c2 = c1.add(node2, user1).remove(node2, user2).remove(node2, user3) c2.elements should contain(user1) - c2.elements should not contain (user2) - c2.elements should not contain (user3) + c2.elements should not contain user2 + c2.elements should not contain user3 // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) - merged1.elements should not contain (user2) - merged1.elements should not contain (user3) + merged1.elements should not contain user2 + merged1.elements should not contain user3 val merged2 = c2.merge(c1) merged2.elements should contain(user1) - merged2.elements should not contain (user2) - merged2.elements should not contain (user3) + merged2.elements should not contain user2 + merged2.elements should not contain user3 val c3 = c1.add(node1, user4).remove(node1, user3).add(node1, user2) @@ -189,13 +189,13 @@ class ORSetSpec extends AnyWordSpec with Matchers { val merged3 = c2.merge(c3) merged3.elements should contain(user1) merged3.elements should contain(user2) - merged3.elements should not contain (user3) + merged3.elements should not contain user3 merged3.elements should contain(user4) val merged4 = c3.merge(c2) merged4.elements should contain(user1) merged4.elements should contain(user2) - merged4.elements should not contain (user3) + merged4.elements should not contain user3 merged4.elements should contain(user4) } @@ -206,23 +206,23 @@ class ORSetSpec extends AnyWordSpec with Matchers { // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) - merged1.elements should not contain (user2) + merged1.elements should not contain user2 val merged2 = c2.merge(c1) merged2.elements should contain(user1) - merged2.elements should not contain (user2) + merged2.elements should not contain user2 val c3 = c1.add(node1, user3) // merge both ways val merged3 = c3.merge(c2) merged3.elements should contain(user1) - merged3.elements should not contain (user2) + merged3.elements should not contain user2 merged3.elements should contain(user3) val merged4 = c2.merge(c3) merged4.elements should contain(user1) - merged4.elements should not contain (user2) + merged4.elements should not contain user2 merged4.elements should contain(user3) } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala index 3cee7a41fe..7d6b1d47ca 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala @@ -154,7 +154,7 @@ class ReplicatedDataSerializerSpec } val numberOfBytes = checkSerialization(largeSet) info(s"size of GSet with ${largeSet.size} elements: $numberOfBytes bytes") - numberOfBytes should be <= (80000) + numberOfBytes should be <= 80000 } "serialize large ORSet" in { @@ -170,7 +170,7 @@ class ReplicatedDataSerializerSpec val numberOfBytes = checkSerialization(largeSet) // note that ORSet is compressed, and therefore smaller than GSet info(s"size of ORSet with ${largeSet.size} elements: $numberOfBytes bytes") - numberOfBytes should be <= (50000) + numberOfBytes should be <= 50000 } "serialize Flag" in { diff --git a/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/AsSubscriber.scala b/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/AsSubscriber.scala index fd0862cac9..86cb919e8a 100644 --- a/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/AsSubscriber.scala +++ b/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/AsSubscriber.scala @@ -15,29 +15,28 @@ import akka.stream.scaladsl.JavaFlowSupport; //#imports object AsSubscriber { - case class Row(name: String) + case class Row(name: String) - class DatabaseClient { - def fetchRows(): Publisher[Row] = ??? - } + class DatabaseClient { + def fetchRows(): Publisher[Row] = ??? + } - val databaseClient: DatabaseClient = ???; + val databaseClient: DatabaseClient = ???; - // #example - val rowSource: Source[Row, NotUsed] = - JavaFlowSupport.Source.asSubscriber - .mapMaterializedValue( - (subscriber: Subscriber[Row]) => { - // For each materialization, fetch the rows from the database: - val rows: Publisher[Row] = databaseClient.fetchRows() - rows.subscribe(subscriber) - NotUsed - }); + // #example + val rowSource: Source[Row, NotUsed] = + JavaFlowSupport.Source.asSubscriber + .mapMaterializedValue((subscriber: Subscriber[Row]) => { + // For each materialization, fetch the rows from the database: + val rows: Publisher[Row] = databaseClient.fetchRows() + rows.subscribe(subscriber) + NotUsed + }); - val names: Source[String, NotUsed] = - // rowSource can be re-used, since it will start a new - // query for each materialization, fully supporting backpressure - // for each materialized stream: - rowSource.map(row => row.name); - //#example + val names: Source[String, NotUsed] = + // rowSource can be re-used, since it will start a new + // query for each materialization, fully supporting backpressure + // for each materialized stream: + rowSource.map(row => row.name); + // #example } diff --git a/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/FromPublisher.scala b/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/FromPublisher.scala index 9d4e966556..0234b2ef80 100644 --- a/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/FromPublisher.scala +++ b/akka-docs/src/test/scala-jdk9-only/docs/stream/operators/source/FromPublisher.scala @@ -15,20 +15,20 @@ import akka.stream.scaladsl.JavaFlowSupport; //#imports object FromPublisher { - case class Row(name: String) + case class Row(name: String) - class DatabaseClient { - def fetchRows(): Publisher[Row] = ??? - } + class DatabaseClient { + def fetchRows(): Publisher[Row] = ??? + } - val databaseClient: DatabaseClient = ??? + val databaseClient: DatabaseClient = ??? - // #example - val names: Source[String, NotUsed] = - // A new subscriber will subscribe to the supplied publisher for each - // materialization, so depending on whether the database client supports - // this the Source can be materialized more than once. - JavaFlowSupport.Source.fromPublisher(databaseClient.fetchRows()) - .map(row => row.name); - //#example + // #example + val names: Source[String, NotUsed] = + // A new subscriber will subscribe to the supplied publisher for each + // materialization, so depending on whether the database client supports + // this the Source can be materialized more than once. + JavaFlowSupport.Source.fromPublisher(databaseClient.fetchRows()) + .map(row => row.name); + // #example } diff --git a/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala b/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala index 748edbd8aa..1f4b7c5d9b 100644 --- a/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala @@ -42,11 +42,11 @@ final case class Message(s: String) //#context-actorOf class FirstActor extends Actor { val child = context.actorOf(Props[MyActor](), name = "myChild") - //#plus-some-behavior + // #plus-some-behavior def receive = { case x => sender() ! x } - //#plus-some-behavior + // #plus-some-behavior } //#context-actorOf @@ -68,7 +68,7 @@ object ValueClassActor { //#actor-with-value-class-argument class DemoActorWrapper extends Actor { - //#props-factory + // #props-factory object DemoActor { /** @@ -91,19 +91,19 @@ class DemoActorWrapper extends Actor { // Props(new DemoActor(42)) would not be safe context.actorOf(DemoActor.props(42), "demo") // ... - //#props-factory + // #props-factory def receive = { case msg => } - //#props-factory + // #props-factory } - //#props-factory + // #props-factory def receive = Actor.emptyBehavior } class ActorWithMessagesWrapper { - //#messages-in-companion + // #messages-in-companion object MyActor { case class Greeting(from: String) case object Goodbye @@ -115,32 +115,32 @@ class ActorWithMessagesWrapper { case Goodbye => log.info("Someone said goodbye to me.") } } - //#messages-in-companion + // #messages-in-companion def receive = Actor.emptyBehavior } class Hook extends Actor { var child: ActorRef = _ - //#preStart + // #preStart override def preStart(): Unit = { child = context.actorOf(Props[MyActor](), "child") } - //#preStart + // #preStart def receive = Actor.emptyBehavior - //#postStop + // #postStop override def postStop(): Unit = { - //#clean-up-some-resources + // #clean-up-some-resources () - //#clean-up-some-resources + // #clean-up-some-resources } - //#postStop + // #postStop } class ReplyException extends Actor { def receive = { case _ => - //#reply-exception + // #reply-exception try { val result = operation() sender() ! result @@ -149,7 +149,7 @@ class ReplyException extends Actor { sender() ! akka.actor.Status.Failure(e) throw e } - //#reply-exception + // #reply-exception } def operation(): String = { "Hi" } @@ -157,7 +157,7 @@ class ReplyException extends Actor { } class StoppingActorsWrapper { - //#stoppingActors-actor + // #stoppingActors-actor class MyActor extends Actor { val child: ActorRef = ??? @@ -172,7 +172,7 @@ class StoppingActorsWrapper { } - //#stoppingActors-actor + // #stoppingActors-actor } //#gracefulStop-actor @@ -215,10 +215,10 @@ class Swapper extends Actor { case Swap => log.info("Hi") become({ - case Swap => - log.info("Ho") - unbecome() // resets the latest 'become' (just for fun) - }, discardOld = false) // push on top instead of replace + case Swap => + log.info("Ho") + unbecome() // resets the latest 'become' (just for fun) + }, discardOld = false) // push on top instead of replace } } @@ -325,7 +325,7 @@ class ActorDocSpec extends AkkaSpec(""" "import context" in { new AnyRef { - //#import-context + // #import-context class FirstActor extends Actor { import context._ val myActor = actorOf(Props[MyActor](), name = "myactor") @@ -333,7 +333,7 @@ class ActorDocSpec extends AkkaSpec(""" case x => myActor ! x } } - //#import-context + // #import-context val first = system.actorOf(Props(classOf[FirstActor], this), name = "first") system.stop(first) @@ -366,7 +366,7 @@ class ActorDocSpec extends AkkaSpec(""" } "run basic Ping Pong" in { - //#fiddle_code + // #fiddle_code val system = ActorSystem("pingpong") val pinger = system.actorOf(Props[Pinger](), "pinger") @@ -378,7 +378,7 @@ class ActorDocSpec extends AkkaSpec(""" ponger ! Ping } - //#fiddle_code + // #fiddle_code val testProbe = new TestProbe(system) testProbe.watch(pinger) @@ -389,46 +389,46 @@ class ActorDocSpec extends AkkaSpec(""" } "instantiates a case class" in { - //#immutable-message-instantiation + // #immutable-message-instantiation val user = User("Mike") // create a new case class message val message = Register(user) - //#immutable-message-instantiation + // #immutable-message-instantiation } "use poison pill" in { val victim = system.actorOf(Props[MyActor]()) - //#poison-pill + // #poison-pill watch(victim) victim ! PoisonPill - //#poison-pill + // #poison-pill expectTerminated(victim) } "creating a Props config" in { - //#creating-props + // #creating-props import akka.actor.Props val props1 = Props[MyActor]() val props2 = Props(new ActorWithArgs("arg")) // careful, see below val props3 = Props(classOf[ActorWithArgs], "arg") // no support for value class arguments - //#creating-props + // #creating-props - //#creating-props-deprecated + // #creating-props-deprecated // NOT RECOMMENDED within another actor: // encourages to close over enclosing class val props7 = Props(new MyActor) - //#creating-props-deprecated + // #creating-props-deprecated } "creating actor with Props" in { - //#system-actorOf + // #system-actorOf import akka.actor.ActorSystem // ActorSystem is a heavy object: create only one per application val system = ActorSystem("mySystem") val myActor = system.actorOf(Props[MyActor](), "myactor2") - //#system-actorOf + // #system-actorOf shutdown(system) } @@ -438,31 +438,31 @@ class ActorDocSpec extends AkkaSpec(""" case n: Int => sender() ! name case message => val target = testActor - //#forward + // #forward target.forward(message) - //#forward + // #forward } } val a: { def actorRef: ActorRef } = new AnyRef { val applicationContext = this - //#creating-indirectly + // #creating-indirectly import akka.actor.IndirectActorProducer class DependencyInjector(applicationContext: AnyRef, beanName: String) extends IndirectActorProducer { override def actorClass = classOf[Actor] override def produce() = - //#obtain-fresh-Actor-instance-from-DI-framework + // #obtain-fresh-Actor-instance-from-DI-framework new Echo(beanName) def this(beanName: String) = this("", beanName) - //#obtain-fresh-Actor-instance-from-DI-framework + // #obtain-fresh-Actor-instance-from-DI-framework } val actorRef = system.actorOf(Props(classOf[DependencyInjector], applicationContext, "hello"), "helloBean") - //#creating-indirectly + // #creating-indirectly } val actorRef = { import scala.language.reflectiveCalls @@ -471,9 +471,9 @@ class ActorDocSpec extends AkkaSpec(""" val message = 42 implicit val self = testActor - //#tell + // #tell actorRef ! message - //#tell + // #tell expectMsg("hello") actorRef ! "huhu" expectMsg("huhu") @@ -481,29 +481,29 @@ class ActorDocSpec extends AkkaSpec(""" "using implicit timeout" in { val myActor = system.actorOf(Props[FirstActor]()) - //#using-implicit-timeout + // #using-implicit-timeout import scala.concurrent.duration._ import akka.util.Timeout import akka.pattern.ask implicit val timeout: Timeout = 5.seconds val future = myActor ? "hello" - //#using-implicit-timeout + // #using-implicit-timeout Await.result(future, timeout.duration) should be("hello") } "using explicit timeout" in { val myActor = system.actorOf(Props[FirstActor]()) - //#using-explicit-timeout + // #using-explicit-timeout import scala.concurrent.duration._ import akka.pattern.ask val future = myActor.ask("hello")(5 seconds) - //#using-explicit-timeout + // #using-explicit-timeout Await.result(future, 5 seconds) should be("hello") } "using receiveTimeout" in { - //#receive-timeout + // #receive-timeout import akka.actor.ReceiveTimeout import scala.concurrent.duration._ class MyActor extends Actor { @@ -519,10 +519,10 @@ class ActorDocSpec extends AkkaSpec(""" throw new RuntimeException("Receive timed out") } } - //#receive-timeout + // #receive-timeout } - //#hot-swap-actor + // #hot-swap-actor class HotSwapActor extends Actor { import context._ def angry: Receive = { @@ -540,35 +540,35 @@ class ActorDocSpec extends AkkaSpec(""" case "bar" => become(happy) } } - //#hot-swap-actor + // #hot-swap-actor "using hot-swap" in { val actor = system.actorOf(Props(classOf[HotSwapActor], this), name = "hot") } "using Stash" in { - //#stash + // #stash import akka.actor.Stash class ActorWithProtocol extends Actor with Stash { def receive = { case "open" => unstashAll() context.become({ - case "write" => // do writing... - case "close" => - unstashAll() - context.unbecome() - case msg => stash() - }, discardOld = false) // stack on top instead of replacing + case "write" => // do writing... + case "close" => + unstashAll() + context.unbecome() + case msg => stash() + }, discardOld = false) // stack on top instead of replacing case msg => stash() } } - //#stash + // #stash } "using watch" in { new AnyRef { - //#watch + // #watch import akka.actor.{ Actor, Props, Terminated } class WatchActor extends Actor { @@ -584,7 +584,7 @@ class ActorDocSpec extends AkkaSpec(""" lastSender ! "finished" } } - //#watch + // #watch val victim = system.actorOf(Props(classOf[WatchActor], this)) victim.tell("kill", testActor) @@ -597,7 +597,7 @@ class ActorDocSpec extends AkkaSpec(""" implicit val sender = testActor val context = this - //#kill + // #kill context.watch(victim) // watch the Actor to receive Terminated message once it dies victim ! Kill @@ -605,31 +605,31 @@ class ActorDocSpec extends AkkaSpec(""" expectMsgPF(hint = "expecting victim to terminate") { case Terminated(v) if v == victim => v // the Actor has indeed terminated } - //#kill + // #kill } "demonstrate ActorSelection" in { val context = system - //#selection-local + // #selection-local // will look up this absolute path context.actorSelection("/user/serviceA/aggregator") // will look up sibling beneath same supervisor context.actorSelection("../joe") - //#selection-local - //#selection-wildcard + // #selection-local + // #selection-wildcard // will look all children to serviceB with names starting with worker context.actorSelection("/user/serviceB/worker*") // will look up all siblings beneath same supervisor context.actorSelection("../*") - //#selection-wildcard - //#selection-remote + // #selection-wildcard + // #selection-remote context.actorSelection("akka://app@otherhost:1234/user/serviceB") - //#selection-remote + // #selection-remote } "using Identify" in { new AnyRef { - //#identify + // #identify import akka.actor.{ Actor, ActorIdentity, Identify, Props, Terminated } class Follower extends Actor { @@ -648,7 +648,7 @@ class ActorDocSpec extends AkkaSpec(""" case Terminated(`another`) => context.stop(self) } } - //#identify + // #identify val a = system.actorOf(Props.empty) val b = system.actorOf(Props(classOf[Follower], this)) @@ -660,7 +660,7 @@ class ActorDocSpec extends AkkaSpec(""" "using pattern gracefulStop" in { val actorRef = system.actorOf(Props[Manager]()) - //#gracefulStop + // #gracefulStop import akka.pattern.gracefulStop import scala.concurrent.Await @@ -672,12 +672,12 @@ class ActorDocSpec extends AkkaSpec(""" // the actor wasn't stopped within 5 seconds case e: akka.pattern.AskTimeoutException => } - //#gracefulStop + // #gracefulStop } "using pattern ask / pipeTo" in { val actorA, actorB, actorC, actorD = system.actorOf(Props.empty) - //#ask-pipeTo + // #ask-pipeTo import akka.pattern.{ ask, pipe } import system.dispatcher // The ExecutionContext that will be used final case class Result(x: Int, s: String, d: Double) @@ -694,20 +694,20 @@ class ActorDocSpec extends AkkaSpec(""" f.pipeTo(actorD) // .. or .. pipe(f) to actorD - //#ask-pipeTo + // #ask-pipeTo } class Replier extends Actor { def receive = { case ref: ActorRef => - //#reply-with-sender + // #reply-with-sender sender().tell("reply", context.parent) // replies will go back to parent sender().!("reply")(context.parent) // alternative syntax - //#reply-with-sender + // #reply-with-sender case x => - //#reply-without-sender + // #reply-without-sender sender() ! x // replies will go to this actor - //#reply-without-sender + // #reply-without-sender } } @@ -729,13 +729,13 @@ class ActorDocSpec extends AkkaSpec(""" { // https://github.com/akka/akka/issues/29056 val someActor = system.actorOf(Props(classOf[Replier], this)) someActor ! PoisonPill - //#coordinated-shutdown-addActorTerminationTask + // #coordinated-shutdown-addActorTerminationTask CoordinatedShutdown(system).addActorTerminationTask( CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName", someActor, Some("stop")) - //#coordinated-shutdown-addActorTerminationTask + // #coordinated-shutdown-addActorTerminationTask } } diff --git a/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala index 812eee2641..435b5a594a 100644 --- a/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala @@ -13,7 +13,7 @@ import akka.serialization.SerializerWithStringManifest class ByteBufferSerializerDocSpec { - //#bytebufserializer-with-manifest + // #bytebufserializer-with-manifest class ExampleByteBufSerializer extends SerializerWithStringManifest with ByteBufferSerializer { override def identifier: Int = 1337 override def manifest(o: AnyRef): String = "naive-toStringImpl" @@ -38,6 +38,6 @@ class ByteBufferSerializerDocSpec { override def toBinary(o: AnyRef, buf: ByteBuffer): Unit = ??? // implement actual logic here override def fromBinary(buf: ByteBuffer, manifest: String): AnyRef = ??? // implement actual logic here } - //#bytebufserializer-with-manifest + // #bytebufserializer-with-manifest } diff --git a/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala b/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala index a39a9d8651..ea99798376 100644 --- a/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala @@ -15,9 +15,9 @@ import scala.collection.immutable object FSMDocSpec { // messages and data types - //#test-code + // #test-code import akka.actor.ActorRef - //#simple-events + // #simple-events // received events final case class SetTarget(ref: ActorRef) final case class Queue(obj: Any) @@ -25,8 +25,8 @@ object FSMDocSpec { // sent events final case class Batch(obj: immutable.Seq[Any]) - //#simple-events - //#simple-state + // #simple-events + // #simple-state // states sealed trait State case object Idle extends State @@ -35,32 +35,32 @@ object FSMDocSpec { sealed trait Data case object Uninitialized extends Data final case class Todo(target: ActorRef, queue: immutable.Seq[Any]) extends Data - //#simple-state - //#test-code + // #simple-state + // #test-code } class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { import FSMDocSpec._ - //#fsm-code-elided - //#simple-imports + // #fsm-code-elided + // #simple-imports import akka.actor.{ ActorRef, FSM } import scala.concurrent.duration._ - //#simple-imports - //#simple-fsm + // #simple-imports + // #simple-fsm class Buncher extends FSM[State, Data] { - //#fsm-body + // #fsm-body startWith(Idle, Uninitialized) - //#when-syntax + // #when-syntax when(Idle) { case Event(SetTarget(ref), Uninitialized) => stay().using(Todo(ref, Vector.empty)) } - //#when-syntax + // #when-syntax - //#transition-elided + // #transition-elided onTransition { case Active -> Idle => stateData match { @@ -68,16 +68,16 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { case _ => // nothing to do } } - //#transition-elided - //#when-syntax + // #transition-elided + // #when-syntax when(Active, stateTimeout = 1 second) { case Event(Flush | StateTimeout, t: Todo) => goto(Idle).using(t.copy(queue = Vector.empty)) } - //#when-syntax + // #when-syntax - //#unhandled-elided + // #unhandled-elided whenUnhandled { // common code for both states case Event(Queue(obj), t @ Todo(_, v)) => @@ -87,12 +87,12 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { log.warning("received unhandled request {} in state {}/{}", e, stateName, s) stay() } - //#unhandled-elided - //#fsm-body + // #unhandled-elided + // #fsm-body initialize() } - //#simple-fsm + // #simple-fsm object DemoCode { trait StateType case object SomeState extends StateType @@ -107,47 +107,47 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { object WillDo object Tick - //#modifier-syntax + // #modifier-syntax when(SomeState) { case Event(msg, _) => goto(Processing).using(newData).forMax(5 seconds).replying(WillDo) } - //#modifier-syntax + // #modifier-syntax - //#transition-syntax + // #transition-syntax onTransition { case Idle -> Active => startTimerWithFixedDelay("timeout", Tick, 1 second) case Active -> _ => cancelTimer("timeout") case x -> Idle => log.info("entering Idle from " + x) } - //#transition-syntax + // #transition-syntax - //#alt-transition-syntax + // #alt-transition-syntax onTransition(handler _) def handler(from: StateType, to: StateType): Unit = { // handle it here ... } - //#alt-transition-syntax + // #alt-transition-syntax - //#stop-syntax + // #stop-syntax when(Error) { case Event("stop", _) => // do cleanup ... stop() } - //#stop-syntax + // #stop-syntax - //#transform-syntax + // #transform-syntax when(SomeState)(transform { case Event(bytes: ByteString, read) => stay().using(read + bytes.length) }.using { case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 => goto(Processing) }) - //#transform-syntax + // #transform-syntax - //#alt-transform-syntax + // #alt-transform-syntax val processingTrigger: PartialFunction[State, State] = { case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 => goto(Processing) @@ -156,17 +156,17 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { when(SomeState)(transform { case Event(bytes: ByteString, read) => stay().using(read + bytes.length) }.using(processingTrigger)) - //#alt-transform-syntax + // #alt-transform-syntax - //#termination-syntax + // #termination-syntax onTermination { case StopEvent(FSM.Normal, state, data) => // ... case StopEvent(FSM.Shutdown, state, data) => // ... case StopEvent(FSM.Failure(cause), state, data) => // ... } - //#termination-syntax + // #termination-syntax - //#unhandled-syntax + // #unhandled-syntax whenUnhandled { case Event(x: X, data) => log.info("Received unhandled event: " + x) @@ -175,14 +175,14 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { log.warning("Received unknown event: " + msg) goto(Error) } - //#unhandled-syntax + // #unhandled-syntax } - //#logging-fsm + // #logging-fsm import akka.actor.LoggingFSM class MyFSM extends LoggingFSM[StateType, Data] { - //#body-elided + // #body-elided override def logDepth = 12 onTermination { case StopEvent(FSM.Failure(_), state, data) => @@ -192,21 +192,21 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { "Events leading up to this point:\n\t" + lastEvents) } // ... - //#body-elided + // #body-elided } - //#logging-fsm + // #logging-fsm } - //#fsm-code-elided + // #fsm-code-elided "simple finite state machine" must { "demonstrate NullFunction" in { class A extends FSM[Int, Null] { val SomeState = 0 - //#NullFunction + // #NullFunction when(SomeState)(FSM.NullFunction) - //#NullFunction + // #NullFunction } } diff --git a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala index 708ea759d8..12a174f8a8 100644 --- a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala @@ -17,13 +17,13 @@ import akka.testkit.{ EventFilter, ImplicitSender, TestKit } //#testkit object FaultHandlingDocSpec { - //#supervisor - //#child + // #supervisor + // #child import akka.actor.Actor - //#child + // #child class Supervisor extends Actor { - //#strategy + // #strategy import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ import scala.concurrent.duration._ @@ -35,17 +35,17 @@ object FaultHandlingDocSpec { case _: IllegalArgumentException => Stop case _: Exception => Escalate } - //#strategy + // #strategy def receive = { case p: Props => sender() ! context.actorOf(p) } } - //#supervisor + // #supervisor - //#supervisor2 + // #supervisor2 class Supervisor2 extends Actor { - //#strategy2 + // #strategy2 import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ import scala.concurrent.duration._ @@ -57,7 +57,7 @@ object FaultHandlingDocSpec { case _: IllegalArgumentException => Stop case _: Exception => Escalate } - //#strategy2 + // #strategy2 def receive = { case p: Props => sender() ! context.actorOf(p) @@ -65,10 +65,10 @@ object FaultHandlingDocSpec { // override default to kill all children during restart override def preRestart(cause: Throwable, msg: Option[Any]): Unit = {} } - //#supervisor2 + // #supervisor2 class Supervisor3 extends Actor { - //#default-strategy-fallback + // #default-strategy-fallback import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ import scala.concurrent.duration._ @@ -79,12 +79,12 @@ object FaultHandlingDocSpec { case t => super.supervisorStrategy.decider.applyOrElse(t, (_: Any) => Escalate) } - //#default-strategy-fallback + // #default-strategy-fallback def receive = Actor.emptyBehavior } - //#child + // #child class Child extends Actor { var state = 0 def receive = { @@ -93,7 +93,7 @@ object FaultHandlingDocSpec { case "get" => sender() ! state } } - //#child + // #child val testConf: Config = ConfigFactory.parseString(""" akka { @@ -126,16 +126,16 @@ class FaultHandlingDocSpec(_system: ActorSystem) "A supervisor" must { "apply the chosen strategy for its child" in { - //#testkit + // #testkit - //#create + // #create val supervisor = system.actorOf(Props[Supervisor](), "supervisor") supervisor ! Props[Child]() val child = expectMsgType[ActorRef] // retrieve answer from TestKit’s testActor - //#create + // #create EventFilter.warning(occurrences = 1).intercept { - //#resume + // #resume child ! 42 // set state to 42 child ! "get" expectMsg(42) @@ -143,24 +143,24 @@ class FaultHandlingDocSpec(_system: ActorSystem) child ! new ArithmeticException // crash it child ! "get" expectMsg(42) - //#resume + // #resume } EventFilter[NullPointerException](occurrences = 1).intercept { - //#restart + // #restart child ! new NullPointerException // crash it harder child ! "get" expectMsg(0) - //#restart + // #restart } EventFilter[IllegalArgumentException](occurrences = 1).intercept { - //#stop + // #stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it expectMsgPF() { case Terminated(`child`) => () } - //#stop + // #stop } EventFilter[Exception]("CRASH", occurrences = 2).intercept { - //#escalate-kill + // #escalate-kill supervisor ! Props[Child]() // create new child val child2 = expectMsgType[ActorRef] watch(child2) @@ -171,8 +171,8 @@ class FaultHandlingDocSpec(_system: ActorSystem) expectMsgPF() { case t @ Terminated(`child2`) if t.existenceConfirmed => () } - //#escalate-kill - //#escalate-restart + // #escalate-kill + // #escalate-restart val supervisor2 = system.actorOf(Props[Supervisor2](), "supervisor2") supervisor2 ! Props[Child]() @@ -185,9 +185,9 @@ class FaultHandlingDocSpec(_system: ActorSystem) child3 ! new Exception("CRASH") child3 ! "get" expectMsg(0) - //#escalate-restart + // #escalate-restart } - //#testkit + // #testkit // code here } } diff --git a/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala b/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala index ebd7d174bd..e1a77f4cc0 100644 --- a/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala @@ -14,7 +14,7 @@ object InitializationDocSpec { case _ => // Ignore } - //#preStartInit + // #preStartInit override def preStart(): Unit = { // Initialize children here } @@ -30,11 +30,11 @@ object InitializationDocSpec { // Keep the call to postStop(), but no stopping of children postStop() } - //#preStartInit + // #preStartInit } class MessageInitExample extends Actor { - //#messageInit + // #messageInit var initializeMe: Option[String] = None override def receive = { @@ -47,7 +47,7 @@ object InitializationDocSpec { def initialized: Receive = { case "U OK?" => initializeMe.foreach { sender() ! _ } } - //#messageInit + // #messageInit } } diff --git a/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala b/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala index f100486135..b66414058d 100644 --- a/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala @@ -15,16 +15,16 @@ case class MyValueClass(v: Int) extends AnyVal class PropsEdgeCaseSpec extends AnyWordSpec with CompileOnlySpec { "value-class-edge-case-example" in compileOnlySpec { - //#props-edge-cases-value-class-example + // #props-edge-cases-value-class-example class ValueActor(value: MyValueClass) extends Actor { def receive = { case multiplier: Long => sender() ! (value.v * multiplier) } } val valueClassProp = Props(classOf[ValueActor], MyValueClass(5)) // Unsupported - //#props-edge-cases-value-class-example + // #props-edge-cases-value-class-example - //#props-edge-cases-default-values + // #props-edge-cases-default-values class DefaultValueActor(a: Int, b: Int = 5) extends Actor { def receive = { case x: Int => sender() ! ((a + x) * b) @@ -40,6 +40,6 @@ class PropsEdgeCaseSpec extends AnyWordSpec with CompileOnlySpec { } val defaultValueProp2 = Props[DefaultValueActor2]() // Unsupported val defaultValueProp3 = Props(classOf[DefaultValueActor2]) // Unsupported - //#props-edge-cases-default-values + // #props-edge-cases-default-values } } diff --git a/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala index 26a0ee3cd8..9eb803ba86 100644 --- a/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala @@ -17,46 +17,46 @@ import akka.testkit._ class SchedulerDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "schedule a one-off task" in { - //#schedule-one-off-message - //Use the system's dispatcher as ExecutionContext + // #schedule-one-off-message + // Use the system's dispatcher as ExecutionContext import system.dispatcher - //Schedules to send the "foo"-message to the testActor after 50ms + // Schedules to send the "foo"-message to the testActor after 50ms system.scheduler.scheduleOnce(50 milliseconds, testActor, "foo") - //#schedule-one-off-message + // #schedule-one-off-message expectMsg(1 second, "foo") - //#schedule-one-off-thunk - //Schedules a function to be executed (send a message to the testActor) after 50ms + // #schedule-one-off-thunk + // Schedules a function to be executed (send a message to the testActor) after 50ms system.scheduler.scheduleOnce(50 milliseconds) { testActor ! System.currentTimeMillis } - //#schedule-one-off-thunk + // #schedule-one-off-thunk } "schedule a recurring task" in { new AnyRef { - //#schedule-recurring + // #schedule-recurring val Tick = "tick" class TickActor extends Actor { def receive = { - case Tick => //Do something + case Tick => // Do something } } val tickActor = system.actorOf(Props(classOf[TickActor], this)) - //Use system's dispatcher as ExecutionContext + // Use system's dispatcher as ExecutionContext import system.dispatcher - //This will schedule to send the Tick-message - //to the tickActor after 0ms repeating every 50ms + // This will schedule to send the Tick-message + // to the tickActor after 0ms repeating every 50ms val cancellable = system.scheduler.scheduleWithFixedDelay(Duration.Zero, 50.milliseconds, tickActor, Tick) - //This cancels further Ticks to be sent + // This cancels further Ticks to be sent cancellable.cancel() - //#schedule-recurring + // #schedule-recurring system.stop(tickActor) } } diff --git a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala index 2678fbba01..9ef3bde372 100644 --- a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala @@ -6,7 +6,7 @@ package docs.actor class SharedMutableStateDocSpec { - //#mutable-state + // #mutable-state import akka.actor.{ Actor, ActorRef } import akka.pattern.ask import akka.util.Timeout @@ -76,5 +76,5 @@ class SharedMutableStateDocSpec { Future { expensiveCalculation(currentSender) } } } - //#mutable-state + // #mutable-state } diff --git a/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala index 66cce641e2..e5d86b2d6b 100644 --- a/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala @@ -5,7 +5,7 @@ package docs.actor object TimerDocSpec { - //#timers + // #timers import scala.concurrent.duration._ import akka.actor.Actor @@ -29,5 +29,5 @@ object TimerDocSpec { // do something useful here } } - //#timers + // #timers } diff --git a/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala b/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala index f21e0f0345..d45efe721a 100644 --- a/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala +++ b/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala @@ -16,36 +16,36 @@ import scala.collection.mutable.ListBuffer */ class UnnestedReceives extends Actor { import context.become - //If you need to store sender/senderFuture you can change it to ListBuffer[(Any, Channel)] + // If you need to store sender/senderFuture you can change it to ListBuffer[(Any, Channel)] val queue = new ListBuffer[Any]() - //This message processes a message/event + // This message processes a message/event def process(msg: Any): Unit = println("processing: " + msg) - //This method subscribes the actor to the event bus - def subscribe(): Unit = {} //Your external stuff - //This method retrieves all prior messages/events + // This method subscribes the actor to the event bus + def subscribe(): Unit = {} // Your external stuff + // This method retrieves all prior messages/events def allOldMessages() = List() override def preStart(): Unit = { - //We override preStart to be sure that the first message the actor gets is - //Replay, that message will start to be processed _after_ the actor is started + // We override preStart to be sure that the first message the actor gets is + // Replay, that message will start to be processed _after_ the actor is started self ! "Replay" - //Then we subscribe to the stream of messages/events + // Then we subscribe to the stream of messages/events subscribe() } def receive = { - case "Replay" => //Our first message should be a Replay message, all others are invalid - allOldMessages().foreach(process) //Process all old messages/events - become { //Switch behavior to look for the GoAhead signal - case "GoAhead" => //When we get the GoAhead signal we process all our buffered messages/events + case "Replay" => // Our first message should be a Replay message, all others are invalid + allOldMessages().foreach(process) // Process all old messages/events + become { // Switch behavior to look for the GoAhead signal + case "GoAhead" => // When we get the GoAhead signal we process all our buffered messages/events queue.foreach(process) queue.clear() - become { //Then we change behavior to process incoming messages/events as they arrive + become { // Then we change behavior to process incoming messages/events as they arrive case msg => process(msg) } - case msg => //While we haven't gotten the GoAhead signal, buffer all incoming messages - queue += msg //Here you have full control, you can handle overflow etc + case msg => // While we haven't gotten the GoAhead signal, buffer all incoming messages + queue += msg // Here you have full control, you can handle overflow etc } } } diff --git a/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala b/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala index 03c5a17697..41ad117de0 100644 --- a/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala @@ -20,30 +20,30 @@ object DnsCompileOnlyDocSpec { implicit val timeout: Timeout = Timeout(1.second) val actorRef: ActorRef = ??? - //#resolve + // #resolve val initial: Option[Dns.Resolved] = Dns(system).cache.resolve("google.com")(system, actorRef) val cached: Option[Dns.Resolved] = Dns(system).cache.cached("google.com") - //#resolve + // #resolve { - //#actor-api-inet-address + // #actor-api-inet-address val resolved: Future[Dns.Resolved] = (IO(Dns) ? Dns.Resolve("google.com")).mapTo[Dns.Resolved] - //#actor-api-inet-address + // #actor-api-inet-address } { - //#actor-api-async + // #actor-api-async val resolved: Future[DnsProtocol.Resolved] = (IO(Dns) ? DnsProtocol.Resolve("google.com")).mapTo[DnsProtocol.Resolved] - //#actor-api-async + // #actor-api-async } { - //#srv + // #srv val resolved: Future[DnsProtocol.Resolved] = (IO(Dns) ? DnsProtocol.Resolve("your-service", Srv)).mapTo[DnsProtocol.Resolved] - //#srv + // #srv } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala b/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala index e35e8cbf6e..b7126d3bd1 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala @@ -14,7 +14,7 @@ object BlockingActor { // DO NOT DO THIS HERE: this is an example of incorrect code, // better alternatives are described further on. - //block for 5 seconds, representing blocking I/O, etc + // block for 5 seconds, representing blocking I/O, etc Thread.sleep(5000) println(s"Blocking operation finished: $i") Behaviors.same diff --git a/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala b/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala index 54c462b3ce..be1af12030 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala @@ -25,7 +25,7 @@ object BlockingFutureActor { def triggerFutureBlockingOperation(i: Int)(implicit ec: ExecutionContext): Future[Unit] = { println(s"Calling blocking Future: $i") Future { - Thread.sleep(5000) //block for 5 seconds + Thread.sleep(5000) // block for 5 seconds println(s"Blocking future finished $i") } } @@ -48,7 +48,7 @@ object SeparateDispatcherFutureActor { def triggerFutureBlockingOperation(i: Int)(implicit ec: ExecutionContext): Future[Unit] = { println(s"Calling blocking Future: $i") Future { - Thread.sleep(5000) //block for 5 seconds + Thread.sleep(5000) // block for 5 seconds println(s"Blocking future finished $i") } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala b/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala index c269e65927..637dc6fcba 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala @@ -16,7 +16,7 @@ import scala.concurrent.duration._ class CoordinatedActorShutdownSpec { - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask object MyActor { trait Messages @@ -33,19 +33,19 @@ class CoordinatedActorShutdownSpec { } } - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask trait Message def root: Behavior[Message] = Behaviors.setup[Message] { context => implicit val system = context.system val myActor = context.spawn(MyActor.behavior, "my-actor") - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask CoordinatedShutdown(context.system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName") { () => implicit val timeout: Timeout = 5.seconds myActor.ask(MyActor.Stop(_)) } - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask Behaviors.empty @@ -56,7 +56,7 @@ class CoordinatedActorShutdownSpec { def cleanup(): Unit = {} import system.executionContext - //#coordinated-shutdown-cancellable + // #coordinated-shutdown-cancellable val c: Cancellable = CoordinatedShutdown(system).addCancellableTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "cleanup") { () => Future { @@ -67,17 +67,17 @@ class CoordinatedActorShutdownSpec { // much later... c.cancel() - //#coordinated-shutdown-cancellable + // #coordinated-shutdown-cancellable - //#coordinated-shutdown-jvm-hook + // #coordinated-shutdown-jvm-hook CoordinatedShutdown(system).addJvmShutdownHook { println("custom JVM shutdown hook...") } - //#coordinated-shutdown-jvm-hook + // #coordinated-shutdown-jvm-hook // don't run this def dummy(): Unit = { - //#coordinated-shutdown-run + // #coordinated-shutdown-run // shut down with `ActorSystemTerminateReason` system.terminate() @@ -85,7 +85,7 @@ class CoordinatedActorShutdownSpec { case object UserInitiatedShutdown extends CoordinatedShutdown.Reason val done: Future[Done] = CoordinatedShutdown(system).run(UserInitiatedShutdown) - //#coordinated-shutdown-run + // #coordinated-shutdown-run } } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala b/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala index 92165aca08..b9d58c088c 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala @@ -11,26 +11,26 @@ object DispatcherDocSpec { val context: ActorContext[Integer] = ??? { - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code import akka.actor.typed.DispatcherSelector val myActor = context.spawn(PrintActor(), "PrintActor", DispatcherSelector.fromConfig("PrintActor")) - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code } { - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher import akka.actor.typed.DispatcherSelector val myActor = context.spawn(PrintActor(), "PrintActor", DispatcherSelector.fromConfig("blocking-io-dispatcher")) - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher } { - //#lookup + // #lookup // for use with Futures, Scheduler, etc. import akka.actor.typed.DispatcherSelector implicit val executionContext = context.system.dispatchers.lookup(DispatcherSelector.fromConfig("my-dispatcher")) - //#lookup + // #lookup } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala b/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala index f59e3f4031..b256bc2309 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala @@ -28,7 +28,7 @@ class SharedMutableStateDocSpec { new MyActor(context) } } - //#mutable-state + // #mutable-state class MyActor(context: ActorContext[MyActor.Command]) extends AbstractBehavior[MyActor.Command](context) { import MyActor._ @@ -90,5 +90,5 @@ class SharedMutableStateDocSpec { this } } - //#mutable-state + // #mutable-state } diff --git a/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala index 571cbc4867..308e4f4299 100644 --- a/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala @@ -27,9 +27,9 @@ class DangerousActor extends Actor with ActorLogging { def notifyMeOnOpen(): Unit = log.warning("My CircuitBreaker is now open, and will not close for one minute") - //#circuit-breaker-initialization + // #circuit-breaker-initialization - //#circuit-breaker-usage + // #circuit-breaker-usage def dangerousCall: String = "This really isn't that dangerous of a call after all" def receive = { @@ -38,7 +38,7 @@ class DangerousActor extends Actor with ActorLogging { case "block for me" => sender() ! breaker.withSyncCircuitBreaker(dangerousCall) } - //#circuit-breaker-usage + // #circuit-breaker-usage } @@ -52,7 +52,7 @@ class TellPatternActor(recipient: ActorRef) extends Actor with ActorLogging { def notifyMeOnOpen(): Unit = log.warning("My CircuitBreaker is now open, and will not close for one minute") - //#circuit-breaker-tell-pattern + // #circuit-breaker-tell-pattern import akka.actor.ReceiveTimeout def receive = { @@ -69,12 +69,12 @@ class TellPatternActor(recipient: ActorRef) extends Actor with ActorLogging { breaker.fail() } } - //#circuit-breaker-tell-pattern + // #circuit-breaker-tell-pattern } class EvenNoFailureActor extends Actor { import context.dispatcher - //#even-no-as-failure + // #even-no-as-failure def luckyNumber(): Future[Int] = { val evenNumberAsFailure: Try[Int] => Boolean = { case Success(n) => n % 2 == 0 @@ -87,7 +87,7 @@ class EvenNoFailureActor extends Actor { // this call will return 8888 and increase failure count at the same time breaker.withCircuitBreaker(Future(8888), evenNumberAsFailure) } - //#even-no-as-failure + // #even-no-as-failure override def receive = { case x: Int => diff --git a/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala b/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala index 33606f5d2d..cbcebd06aa 100644 --- a/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala @@ -21,15 +21,15 @@ object ClusterDocSpec { class ClusterDocSpec extends AkkaSpec(ClusterDocSpec.config) with CompileOnlySpec { "demonstrate leave" in compileOnlySpec { - //#leave + // #leave val cluster = Cluster(system) cluster.leave(cluster.selfAddress) - //#leave + // #leave } "demonstrate data center" in compileOnlySpec { { - //#dcAccess + // #dcAccess val cluster = Cluster(system) // this node's data center val dc = cluster.selfDataCenter @@ -38,19 +38,19 @@ class ClusterDocSpec extends AkkaSpec(ClusterDocSpec.config) with CompileOnlySpe // a specific member's data center val aMember = cluster.state.members.head val aDc = aMember.dataCenter - //#dcAccess + // #dcAccess } } "demonstrate programmatic joining to seed nodes" in compileOnlySpec { - //#join-seed-nodes + // #join-seed-nodes import akka.actor.Address import akka.cluster.Cluster val cluster = Cluster(system) - val list: List[Address] = ??? //your method to dynamically get seed nodes + val list: List[Address] = ??? // your method to dynamically get seed nodes cluster.joinSeedNodes(list) - //#join-seed-nodes + // #join-seed-nodes } } diff --git a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala index e21f33101d..7d11c6966c 100644 --- a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala +++ b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala @@ -56,18 +56,18 @@ object FactorialFrontend { val system = ActorSystem("ClusterSystem", config) system.log.info("Factorials will start when 2 backend members in the cluster.") - //#registerOnUp + // #registerOnUp Cluster(system).registerOnMemberUp { system.actorOf(Props(classOf[FactorialFrontend], upToN, true), name = "factorialFrontend") } - //#registerOnUp + // #registerOnUp } } // not used, only for documentation abstract class FactorialFrontend2 extends Actor { - //#router-lookup-in-code + // #router-lookup-in-code import akka.cluster.routing.ClusterRouterGroup import akka.cluster.routing.ClusterRouterGroupSettings import akka.cluster.metrics.AdaptiveLoadBalancingGroup @@ -83,12 +83,12 @@ abstract class FactorialFrontend2 extends Actor { useRoles = Set("backend"))).props(), name = "factorialBackendRouter2") - //#router-lookup-in-code + // #router-lookup-in-code } // not used, only for documentation abstract class FactorialFrontend3 extends Actor { - //#router-deploy-in-code + // #router-deploy-in-code import akka.cluster.routing.ClusterRouterPool import akka.cluster.routing.ClusterRouterPoolSettings import akka.cluster.metrics.AdaptiveLoadBalancingPool @@ -103,5 +103,5 @@ abstract class FactorialFrontend3 extends Actor { allowLocalRoutees = false, useRoles = Set("backend"))).props(Props[FactorialBackend]()), name = "factorialBackendRouter3") - //#router-deploy-in-code + // #router-deploy-in-code } diff --git a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala index 800c1b999d..62397e0e34 100644 --- a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala +++ b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala @@ -15,9 +15,9 @@ class SimpleClusterListener extends Actor with ActorLogging { // subscribe to cluster changes, re-subscribe when restart override def preStart(): Unit = { - //#subscribe + // #subscribe cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember]) - //#subscribe + // #subscribe } override def postStop(): Unit = cluster.unsubscribe(self) diff --git a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala index 236b56585d..24c5049345 100644 --- a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala +++ b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala @@ -10,25 +10,25 @@ import akka.cluster.ClusterEvent._ class SimpleClusterListener2 extends Actor with ActorLogging { - //#join + // #join val cluster = Cluster(context.system) - //#join + // #join // subscribe to cluster changes, re-subscribe when restart override def preStart(): Unit = { - //#join + // #join cluster.join(cluster.selfAddress) - //#join + // #join - //#subscribe + // #subscribe cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember]) - //#subscribe + // #subscribe - //#register-on-memberup + // #register-on-memberup cluster.registerOnMemberUp { cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember]) } - //#register-on-memberup + // #register-on-memberup } override def postStop(): Unit = cluster.unsubscribe(self) diff --git a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala index b6adfd5408..fa4ff486ab 100644 --- a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala +++ b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala @@ -19,7 +19,7 @@ import akka.actor.Actor abstract class ClusterSingletonSupervision extends Actor { import akka.actor.{ ActorRef, Props, SupervisorStrategy } def createSingleton(name: String, props: Props, supervisorStrategy: SupervisorStrategy): ActorRef = { - //#singleton-supervisor-actor-usage + // #singleton-supervisor-actor-usage import akka.actor.{ PoisonPill, Props } import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings } context.system.actorOf( @@ -28,6 +28,6 @@ abstract class ClusterSingletonSupervision extends Actor { terminationMessage = PoisonPill, settings = ClusterSingletonManagerSettings(context.system)), name = name) - //#singleton-supervisor-actor-usage + // #singleton-supervisor-actor-usage } } diff --git a/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala b/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala index dea8d43802..309e948386 100644 --- a/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala +++ b/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala @@ -18,14 +18,14 @@ class ConfigDocSpec extends AnyWordSpec with Matchers { val rootBehavior = Behaviors.empty[String] def compileOnlyCustomConfig(): Unit = { - //#custom-config + // #custom-config val customConf = ConfigFactory.parseString(""" akka.log-config-on-start = on """) // ConfigFactory.load sandwiches customConfig between default reference // config and default overrides, and then resolves it. val system = ActorSystem(rootBehavior, "MySystem", ConfigFactory.load(customConf)) - //#custom-config + // #custom-config } def compileOnlyPrintConfig(): Unit = { diff --git a/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala b/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala index 918173bda2..eec7b46bde 100644 --- a/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala +++ b/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala @@ -68,20 +68,20 @@ class LeaseDocSpec extends AkkaSpec(LeaseDocSpec.config) { "A docs lease" should { "scala lease be loadable from scala" in { - //#lease-usage + // #lease-usage val lease = LeaseProvider(system).getLease("", "docs-lease", "owner") val acquired: Future[Boolean] = lease.acquire() val stillAcquired: Boolean = lease.checkLease() val released: Future[Boolean] = lease.release() - //#lease-usage + // #lease-usage - //#lost-callback + // #lost-callback lease.acquire(leaseLostReason => doSomethingImportant(leaseLostReason)) - //#lost-callback + // #lost-callback - //#cluster-owner + // #cluster-owner val owner = Cluster(system).selfAddress.hostPort - //#cluster-owner + // #cluster-owner // remove compiler warnings blackhole(acquired, stillAcquired, released, owner) diff --git a/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala b/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala index 42cea259a5..361fca6e98 100644 --- a/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala +++ b/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala @@ -44,7 +44,7 @@ object DistributedDataDocSpec { #//#japi-serializer-config """ - //#data-bot + // #data-bot import java.util.concurrent.ThreadLocalRandom import akka.actor.Actor import akka.actor.ActorLogging @@ -86,7 +86,6 @@ object DistributedDataDocSpec { } case _: UpdateResponse[_] => // ignore - case c @ Changed(DataKey) => val data = c.get(DataKey) log.info("Current elements: {}", data.elements) @@ -95,7 +94,7 @@ object DistributedDataDocSpec { override def postStop(): Unit = tickTask.cancel() } - //#data-bot + // #data-bot } @@ -106,7 +105,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val probe = TestProbe() implicit val self: ActorRef = probe.ref - //#update + // #update implicit val node: SelfUniqueAddress = DistributedData(system).selfUniqueAddress val replicator = DistributedData(system).replicator @@ -125,21 +124,21 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val writeAll = WriteAll(timeout = 5.seconds) replicator ! Update(ActiveFlagKey, Flag.Disabled, writeAll)(_.switchOn) - //#update + // #update probe.expectMsgType[UpdateResponse[_]] match { - //#update-response1 + // #update-response1 case UpdateSuccess(Counter1Key, req) => // ok - //#update-response1 + // #update-response1 case unexpected => fail("Unexpected response: " + unexpected) } probe.expectMsgType[UpdateResponse[_]] match { - //#update-response2 + // #update-response2 case UpdateSuccess(Set1Key, req) => // ok case UpdateTimeout(Set1Key, req) => // write to 3 nodes failed within 1.second - //#update-response2 + // #update-response2 case UpdateSuccess(Set2Key, None) => case unexpected => fail("Unexpected response: " + unexpected) } @@ -151,7 +150,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { implicit val self: ActorRef = probe.ref def sender() = self - //#update-request-context + // #update-request-context implicit val node = DistributedData(system).selfUniqueAddress val replicator = DistributedData(system).replicator val writeTwo = WriteTo(n = 2, timeout = 3.second) @@ -168,14 +167,14 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { case UpdateTimeout(Counter1Key, Some(replyTo: ActorRef)) => replyTo ! "nack" } - //#update-request-context + // #update-request-context } "demonstrate get" in { val probe = TestProbe() implicit val self: ActorRef = probe.ref - //#get + // #get val replicator = DistributedData(system).replicator val Counter1Key = PNCounterKey("counter1") val Set1Key = GSetKey[String]("set1") @@ -192,25 +191,25 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val readAll = ReadAll(timeout = 5.seconds) replicator ! Get(ActiveFlagKey, readAll) - //#get + // #get probe.expectMsgType[GetResponse[_]] match { - //#get-response1 + // #get-response1 case g @ GetSuccess(Counter1Key, req) => val value = g.get(Counter1Key).value case NotFound(Counter1Key, req) => // key counter1 does not exist - //#get-response1 + // #get-response1 case unexpected => fail("Unexpected response: " + unexpected) } probe.expectMsgType[GetResponse[_]] match { - //#get-response2 + // #get-response2 case g @ GetSuccess(Set1Key, req) => val elements = g.get(Set1Key).elements case GetFailure(Set1Key, req) => // read from 3 nodes failed within 1.second case NotFound(Set1Key, req) => // key set1 does not exist - //#get-response2 + // #get-response2 case g @ GetSuccess(Set2Key, None) => val elements = g.get(Set2Key).elements case unexpected => fail("Unexpected response: " + unexpected) @@ -223,7 +222,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { implicit val self: ActorRef = probe.ref def sender() = self - //#get-request-context + // #get-request-context implicit val node = DistributedData(system).selfUniqueAddress val replicator = DistributedData(system).replicator val readTwo = ReadFrom(n = 2, timeout = 3.second) @@ -242,7 +241,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { case NotFound(Counter1Key, Some(replyTo: ActorRef)) => replyTo ! 0L } - //#get-request-context + // #get-request-context } "demonstrate subscribe" in { @@ -251,7 +250,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { implicit val self: ActorRef = probe.ref def sender() = self - //#subscribe + // #subscribe val replicator = DistributedData(system).replicator val Counter1Key = PNCounterKey("counter1") // subscribe to changes of the Counter1Key value @@ -265,14 +264,14 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { // incoming request to retrieve current value of the counter sender() ! currentValue } - //#subscribe + // #subscribe } "demonstrate delete" in { val probe = TestProbe() implicit val self: ActorRef = probe.ref - //#delete + // #delete val replicator = DistributedData(system).replicator val Counter1Key = PNCounterKey("counter1") val Set2Key = ORSetKey[String]("set2") @@ -281,12 +280,12 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val writeMajority = WriteMajority(timeout = 5.seconds) replicator ! Delete(Set2Key, writeMajority) - //#delete + // #delete } "demonstrate PNCounter" in { def println(o: Any): Unit = () - //#pncounter + // #pncounter implicit val node = DistributedData(system).selfUniqueAddress val c0 = PNCounter.empty @@ -294,12 +293,12 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val c2 = c1 :+ 7 val c3: PNCounter = c2.decrement(2) println(c3.value) // 6 - //#pncounter + // #pncounter } "demonstrate PNCounterMap" in { def println(o: Any): Unit = () - //#pncountermap + // #pncountermap implicit val node = DistributedData(system).selfUniqueAddress val m0 = PNCounterMap.empty[String] val m1 = m0.increment(node, "a", 7) @@ -307,35 +306,35 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val m3 = m2.increment(node, "b", 1) println(m3.get("a")) // 5 m3.entries.foreach { case (key, value) => println(s"$key -> $value") } - //#pncountermap + // #pncountermap } "demonstrate GSet" in { def println(o: Any): Unit = () - //#gset + // #gset val s0 = GSet.empty[String] val s1 = s0 + "a" val s2 = s1 + "b" + "c" if (s2.contains("a")) println(s2.elements) // a, b, c - //#gset + // #gset } "demonstrate ORSet" in { def println(o: Any): Unit = () - //#orset + // #orset implicit val node = DistributedData(system).selfUniqueAddress val s0 = ORSet.empty[String] val s1 = s0 :+ "a" val s2 = s1 :+ "b" val s3 = s2.remove("a") println(s3.elements) // b - //#orset + // #orset } "demonstrate ORMultiMap" in { def println(o: Any): Unit = () - //#ormultimap + // #ormultimap implicit val node = DistributedData(system).selfUniqueAddress val m0 = ORMultiMap.empty[String, Int] val m1 = m0 :+ ("a" -> Set(1, 2, 3)) @@ -343,32 +342,32 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val m3 = m2.removeBinding(node, "a", 2) val m4 = m3.addBinding(node, "b", 1) println(m4.entries) - //#ormultimap + // #ormultimap } "demonstrate Flag" in { def println(o: Any): Unit = () - //#flag + // #flag val f0 = Flag.Disabled val f1 = f0.switchOn println(f1.enabled) - //#flag + // #flag } "demonstrate LWWRegister" in { def println(o: Any): Unit = () - //#lwwregister + // #lwwregister implicit val node = DistributedData(system).selfUniqueAddress val r1 = LWWRegister.create("Hello") val r2 = r1.withValueOf("Hi") println(s"${r1.value} by ${r1.updatedBy} at ${r1.timestamp}") - //#lwwregister + // #lwwregister r2.value should be("Hi") } "demonstrate LWWRegister with custom clock" in { def println(o: Any): Unit = () - //#lwwregister-custom-clock + // #lwwregister-custom-clock case class Record(version: Int, name: String, address: String) implicit val node = DistributedData(system).selfUniqueAddress @@ -385,7 +384,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val r3 = r1.merge(r2) println(r3.value) - //#lwwregister-custom-clock + // #lwwregister-custom-clock r3.value.address should be("Madison Square") } diff --git a/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala b/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala index b230587ac0..d0fec41016 100644 --- a/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala +++ b/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala @@ -26,11 +26,11 @@ object ShoppingCart { final case class Cart(items: Set[LineItem]) final case class LineItem(productId: String, title: String, quantity: Int) - //#read-write-majority + // #read-write-majority private val timeout = 3.seconds private val readMajority = ReadMajority(timeout) private val writeMajority = WriteMajority(timeout) - //#read-write-majority + // #read-write-majority } @@ -49,7 +49,7 @@ class ShoppingCart(userId: String) extends Actor { .orElse[Any, Unit](receiveRemoveItem) .orElse[Any, Unit](receiveOther) - //#get-cart + // #get-cart def receiveGetCart: Receive = { case GetCart => replicator ! Get(DataKey, readMajority, Some(sender())) @@ -66,9 +66,9 @@ class ShoppingCart(userId: String) extends Actor { // ReadMajority failure, try again with local read replicator ! Get(DataKey, ReadLocal, Some(replyTo)) } - //#get-cart + // #get-cart - //#add-item + // #add-item def receiveAddItem: Receive = { case cmd @ AddItem(item) => val update = Update(DataKey, LWWMap.empty[String, LineItem], writeMajority, Some(cmd)) { cart => @@ -76,7 +76,7 @@ class ShoppingCart(userId: String) extends Actor { } replicator ! update } - //#add-item + // #add-item def updateCart(data: LWWMap[String, LineItem], item: LineItem): LWWMap[String, LineItem] = data.get(item.productId) match { @@ -85,7 +85,7 @@ class ShoppingCart(userId: String) extends Actor { case None => data :+ (item.productId -> item) } - //#remove-item + // #remove-item def receiveRemoveItem: Receive = { case cmd @ RemoveItem(productId) => // Try to fetch latest from a majority of nodes first, since ORMap @@ -106,7 +106,7 @@ class ShoppingCart(userId: String) extends Actor { case NotFound(DataKey, Some(RemoveItem(productId))) => // nothing to remove } - //#remove-item + // #remove-item def receiveOther: Receive = { case _: UpdateSuccess[_] | _: UpdateTimeout[_] => diff --git a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala index cdb39bcc36..4fea2ccf80 100644 --- a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala +++ b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala @@ -62,7 +62,7 @@ class TwoPhaseSetSerializer(val system: ExtendedActorSystem) extends Serializer //#serializer class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) extends TwoPhaseSetSerializer(system) { - //#compression + // #compression override def toBinary(obj: AnyRef): Array[Byte] = obj match { case m: TwoPhaseSet => compress(twoPhaseSetToProto(m)) case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass}") @@ -71,5 +71,5 @@ class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) extends override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { twoPhaseSetFromBinary(decompress(bytes)) } - //#compression + // #compression } diff --git a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala index 75484d1110..c920120bab 100644 --- a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala @@ -229,7 +229,7 @@ object DispatcherDocSpec { """ - //#prio-mailbox + // #prio-mailbox import akka.dispatch.PriorityGenerator import akka.dispatch.UnboundedStablePriorityMailbox import com.typesafe.config.Config @@ -252,13 +252,13 @@ object DispatcherDocSpec { // We default to 1, which is in between high and low case otherwise => 1 }) - //#prio-mailbox + // #prio-mailbox - //#control-aware-mailbox-messages + // #control-aware-mailbox-messages import akka.dispatch.ControlMessage case object MyControlMessage extends ControlMessage - //#control-aware-mailbox-messages + // #control-aware-mailbox-messages class MyActor extends Actor { def receive = { @@ -266,23 +266,23 @@ object DispatcherDocSpec { } } - //#required-mailbox-class + // #required-mailbox-class import akka.dispatch.RequiresMessageQueue import akka.dispatch.BoundedMessageQueueSemantics class MyBoundedActor extends MyActor with RequiresMessageQueue[BoundedMessageQueueSemantics] - //#required-mailbox-class + // #required-mailbox-class - //#require-mailbox-on-actor + // #require-mailbox-on-actor class MySpecialActor extends Actor with RequiresMessageQueue[MyUnboundedMessageQueueSemantics] { - //#require-mailbox-on-actor + // #require-mailbox-on-actor def receive = { case _ => } - //#require-mailbox-on-actor + // #require-mailbox-on-actor // ... } - //#require-mailbox-on-actor + // #require-mailbox-on-actor } class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { @@ -291,19 +291,19 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining dispatcher in config" in { val context = system - //#defining-dispatcher-in-config + // #defining-dispatcher-in-config import akka.actor.Props val myActor = context.actorOf(Props[MyActor](), "myactor") - //#defining-dispatcher-in-config + // #defining-dispatcher-in-config } "defining dispatcher in code" in { val context = system - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code import akka.actor.Props val myActor = context.actorOf(Props[MyActor]().withDispatcher("my-dispatcher"), "myactor1") - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code } "defining dispatcher with bounded queue" in { @@ -312,49 +312,49 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining fixed-pool-size dispatcher" in { val context = system - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher val myActor = context.actorOf(Props[MyActor]().withDispatcher("blocking-io-dispatcher"), "myactor2") - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher } "defining pinned dispatcher" in { val context = system - //#defining-pinned-dispatcher + // #defining-pinned-dispatcher val myActor = context.actorOf(Props[MyActor]().withDispatcher("my-pinned-dispatcher"), "myactor3") - //#defining-pinned-dispatcher + // #defining-pinned-dispatcher } "defining affinity-pool dispatcher" in { val context = system - //#defining-affinity-pool-dispatcher + // #defining-affinity-pool-dispatcher val myActor = context.actorOf(Props[MyActor]().withDispatcher("affinity-pool-dispatcher"), "myactor4") - //#defining-affinity-pool-dispatcher + // #defining-affinity-pool-dispatcher } "looking up a dispatcher" in { - //#lookup + // #lookup // for use with Futures, Scheduler, etc. implicit val executionContext = system.dispatchers.lookup("my-dispatcher") - //#lookup + // #lookup } "defining mailbox in config" in { val context = system - //#defining-mailbox-in-config + // #defining-mailbox-in-config import akka.actor.Props val myActor = context.actorOf(Props[MyActor](), "priomailboxactor") - //#defining-mailbox-in-config + // #defining-mailbox-in-config } "defining mailbox in code" in { val context = system - //#defining-mailbox-in-code + // #defining-mailbox-in-code import akka.actor.Props val myActor = context.actorOf(Props[MyActor]().withMailbox("prio-mailbox")) - //#defining-mailbox-in-code + // #defining-mailbox-in-code } "using a required mailbox" in { @@ -364,7 +364,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining priority dispatcher" in { new AnyRef { - //#prio-dispatcher + // #prio-dispatcher // We create a new Actor that just prints out what it processes class Logger extends Actor { @@ -396,7 +396,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { * lowpriority * lowpriority */ - //#prio-dispatcher + // #prio-dispatcher watch(a) expectMsgPF() { case Terminated(`a`) => () } @@ -405,7 +405,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining control aware dispatcher" in { new AnyRef { - //#control-aware-dispatcher + // #control-aware-dispatcher // We create a new Actor that just prints out what it processes class Logger extends Actor { @@ -428,7 +428,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { * foo * bar */ - //#control-aware-dispatcher + // #control-aware-dispatcher watch(a) expectMsgPF() { case Terminated(`a`) => () } diff --git a/akka-docs/src/test/scala/docs/duration/Sample.scala b/akka-docs/src/test/scala/docs/duration/Sample.scala index 37bad26d1a..830d3fd28b 100644 --- a/akka-docs/src/test/scala/docs/duration/Sample.scala +++ b/akka-docs/src/test/scala/docs/duration/Sample.scala @@ -7,7 +7,7 @@ package docs.duration import language.postfixOps object Scala { - //#dsl + // #dsl import scala.concurrent.duration._ val fivesec = 5.seconds @@ -16,11 +16,11 @@ object Scala { assert(diff < fivesec) val fourmillis = threemillis * 4 / 3 // you cannot write it the other way around val n = threemillis / (1 millisecond) - //#dsl + // #dsl - //#deadline + // #deadline val deadline = 10.seconds.fromNow // do something val rest = deadline.timeLeft - //#deadline + // #deadline } diff --git a/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala b/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala index 2b684798cf..edf1e2a806 100644 --- a/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala +++ b/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala @@ -11,7 +11,7 @@ import akka.testkit.TestProbe object EventBusDocSpec { - //#lookup-bus + // #lookup-bus import akka.event.EventBus import akka.event.LookupClassification @@ -46,9 +46,9 @@ object EventBusDocSpec { } - //#lookup-bus + // #lookup-bus - //#subchannel-bus + // #subchannel-bus import akka.util.Subclassification class StartsWithSubclassification extends Subclassification[String] { @@ -84,9 +84,9 @@ object EventBusDocSpec { subscriber ! event.payload } } - //#subchannel-bus + // #subchannel-bus - //#scanning-bus + // #scanning-bus import akka.event.ScanningClassification /** @@ -118,9 +118,9 @@ object EventBusDocSpec { subscriber ! event } } - //#scanning-bus + // #scanning-bus - //#actor-bus + // #actor-bus import akka.event.ActorEventBus import akka.event.ManagedActorClassification import akka.event.ActorClassifier @@ -140,7 +140,7 @@ object EventBusDocSpec { // used internally (i.e. the expected number of different classifiers) override protected def mapSize: Int = 128 } - //#actor-bus + // #actor-bus } @@ -148,17 +148,17 @@ class EventBusDocSpec extends AkkaSpec { import EventBusDocSpec._ "demonstrate LookupClassification" in { - //#lookup-bus-test + // #lookup-bus-test val lookupBus = new LookupBusImpl lookupBus.subscribe(testActor, "greetings") lookupBus.publish(MsgEnvelope("time", System.currentTimeMillis())) lookupBus.publish(MsgEnvelope("greetings", "hello")) expectMsg("hello") - //#lookup-bus-test + // #lookup-bus-test } "demonstrate SubchannelClassification" in { - //#subchannel-bus-test + // #subchannel-bus-test val subchannelBus = new SubchannelBusImpl subchannelBus.subscribe(testActor, "abc") subchannelBus.publish(MsgEnvelope("xyzabc", "x")) @@ -167,11 +167,11 @@ class EventBusDocSpec extends AkkaSpec { expectMsg("c") subchannelBus.publish(MsgEnvelope("abcdef", "d")) expectMsg("d") - //#subchannel-bus-test + // #subchannel-bus-test } "demonstrate ScanningClassification" in { - //#scanning-bus-test + // #scanning-bus-test val scanningBus = new ScanningBusImpl scanningBus.subscribe(testActor, 3) scanningBus.publish("xyzabc") @@ -179,11 +179,11 @@ class EventBusDocSpec extends AkkaSpec { expectMsg("ab") scanningBus.publish("abc") expectMsg("abc") - //#scanning-bus-test + // #scanning-bus-test } "demonstrate ManagedActorClassification" in { - //#actor-bus-test + // #actor-bus-test val observer1 = TestProbe().ref val observer2 = TestProbe().ref val probe1 = TestProbe() @@ -200,6 +200,6 @@ class EventBusDocSpec extends AkkaSpec { actorBus.publish(Notification(observer2, 101)) probe2.expectMsg(Notification(observer2, 101)) probe1.expectNoMessage(500.millis) - //#actor-bus-test + // #actor-bus-test } } diff --git a/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala b/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala index 89f0f4a7b4..59bf72b999 100644 --- a/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala @@ -9,7 +9,7 @@ import akka.testkit.AkkaSpec object LoggingDocSpec { - //#my-actor + // #my-actor import akka.event.Logging class MyActor extends Actor { @@ -25,7 +25,7 @@ object LoggingDocSpec { case x => log.warning("Received unknown message: {}", x) } } - //#my-actor + // #my-actor import akka.event.Logging @@ -34,7 +34,7 @@ object LoggingDocSpec { def receive = { case _ => { - //#mdc + // #mdc val mdc = Map("requestId" -> 1234, "visitorId" -> 5678) log.mdc(mdc) @@ -42,12 +42,12 @@ object LoggingDocSpec { log.info("Starting new request") log.clearMDC() - //#mdc + // #mdc } } } - //#mdc-actor + // #mdc-actor import Logging.MDC final case class Req(work: String, visitorId: Int) @@ -72,9 +72,9 @@ object LoggingDocSpec { } } - //#mdc-actor + // #mdc-actor - //#my-event-listener + // #my-event-listener import akka.event.Logging.Debug import akka.event.Logging.Error import akka.event.Logging.Info @@ -91,9 +91,9 @@ object LoggingDocSpec { case Debug(logSource, logClass, message) => // ... } } - //#my-event-listener + // #my-event-listener - //#my-source + // #my-source import akka.actor.ActorSystem import akka.event.LogSource @@ -110,12 +110,12 @@ object LoggingDocSpec { val log = Logging(system, this) } - //#my-source + // #my-source object Listeners { def println(s: Any) = () - //#deadletters + // #deadletters import akka.actor.{ Actor, DeadLetter, Props } class DeadLetterListener extends Actor { @@ -123,9 +123,9 @@ object LoggingDocSpec { case d: DeadLetter => println(d) } } - //#deadletters + // #deadletters - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream abstract class AllKindsOfMusic { def artist: String } case class Jazz(artist: String) extends AllKindsOfMusic case class Electronic(artist: String) extends AllKindsOfMusic @@ -136,7 +136,7 @@ object LoggingDocSpec { case m: Electronic => println(s"${self.path.name} is listening to: ${m.artist}") } } - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream } } @@ -162,16 +162,16 @@ class LoggingDocSpec extends AkkaSpec { "allow registration to dead letters" in { import LoggingDocSpec.Listeners._ - //#deadletters + // #deadletters val listener = system.actorOf(Props[DeadLetterListener]()) system.eventStream.subscribe(listener, classOf[DeadLetter]) - //#deadletters + // #deadletters } "demonstrate superclass subscriptions on eventStream" in { import LoggingDocSpec.Listeners._ - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream val jazzListener = system.actorOf(Props[Listener]()) val musicListener = system.actorOf(Props[Listener]()) @@ -183,29 +183,29 @@ class LoggingDocSpec extends AkkaSpec { // jazzListener and musicListener will be notified about Jazz: system.eventStream.publish(Jazz("Sonny Rollins")) - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream } "allow registration to suppressed dead letters" in { import akka.actor.Props val listener = system.actorOf(Props[MyActor]()) - //#suppressed-deadletters + // #suppressed-deadletters import akka.actor.SuppressedDeadLetter system.eventStream.subscribe(listener, classOf[SuppressedDeadLetter]) - //#suppressed-deadletters + // #suppressed-deadletters - //#all-deadletters + // #all-deadletters import akka.actor.AllDeadLetters system.eventStream.subscribe(listener, classOf[AllDeadLetters]) - //#all-deadletters + // #all-deadletters } "demonstrate logging more arguments" in { - //#array + // #array val args = Array("The", "brown", "fox", "jumps", 42) system.log.debug("five parameters: {}, {}, {}, {}, {}", args) - //#array + // #array } } diff --git a/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala b/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala index 21e2027f47..414a2e2e7f 100644 --- a/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala @@ -14,11 +14,11 @@ import akka.testkit.AkkaSpec import akka.actor.Extension class CountExtensionImpl extends Extension { - //Since this Extension is a shared instance + // Since this Extension is a shared instance // per ActorSystem we need to be threadsafe private val counter = new AtomicLong(0) - //This is the operation this Extension provides + // This is the operation this Extension provides def increment() = counter.incrementAndGet() } //#extension @@ -30,13 +30,13 @@ import akka.actor.ExtensionIdProvider import akka.actor.ExtendedActorSystem object CountExtension extends ExtensionId[CountExtensionImpl] with ExtensionIdProvider { - //The lookup method is required by ExtensionIdProvider, + // The lookup method is required by ExtensionIdProvider, // so we return ourselves here, this allows us // to configure our extension to be loaded when // the ActorSystem starts up override def lookup = CountExtension - //This method will be called by Akka + // This method will be called by Akka // to instantiate our Extension override def createExtension(system: ExtendedActorSystem) = new CountExtensionImpl @@ -58,7 +58,7 @@ object ExtensionDocSpec { //#config """ - //#extension-usage-actor + // #extension-usage-actor class MyActor extends Actor { def receive = { @@ -66,9 +66,9 @@ object ExtensionDocSpec { CountExtension(context.system).increment() } } - //#extension-usage-actor + // #extension-usage-actor - //#extension-usage-actor-trait + // #extension-usage-actor-trait trait Counting { self: Actor => def increment() = CountExtension(context.system).increment() @@ -78,21 +78,21 @@ object ExtensionDocSpec { case someMessage => increment() } } - //#extension-usage-actor-trait + // #extension-usage-actor-trait } class ExtensionDocSpec extends AkkaSpec(ExtensionDocSpec.config) { "demonstrate how to create an extension in Scala" in { - //#extension-usage + // #extension-usage CountExtension(system).increment() - //#extension-usage + // #extension-usage } "demonstrate how to lookup a configured extension in Scala" in { - //#extension-lookup + // #extension-lookup system.extension(CountExtension) - //#extension-lookup + // #extension-lookup } } diff --git a/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala index 1e823212a6..a141bc673a 100644 --- a/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala @@ -61,13 +61,13 @@ object SettingsExtensionDocSpec { //#config """ - //#extension-usage-actor + // #extension-usage-actor class MyActor extends Actor { val settings = Settings(context.system) val connection = connect(settings.DbUri, settings.CircuitBreakerTimeout) - //#extension-usage-actor + // #extension-usage-actor def receive = { case someMessage => } @@ -82,10 +82,10 @@ object SettingsExtensionDocSpec { class SettingsExtensionDocSpec extends AkkaSpec(SettingsExtensionDocSpec.config) { "demonstrate how to create application specific settings extension in Scala" in { - //#extension-usage + // #extension-usage val dbUri = Settings(system).DbUri val circuitBreakerTimeout = Settings(system).CircuitBreakerTimeout - //#extension-usage + // #extension-usage } } diff --git a/akka-docs/src/test/scala/docs/faq/Faq.scala b/akka-docs/src/test/scala/docs/faq/Faq.scala index ede7e30d8b..7f87969ba8 100644 --- a/akka-docs/src/test/scala/docs/faq/Faq.scala +++ b/akka-docs/src/test/scala/docs/faq/Faq.scala @@ -26,9 +26,9 @@ class MyActor extends Actor { case BarMessage(bar) => sender() ! BazMessage("Got " + bar) // warning here: // "match may not be exhaustive. It would fail on the following input: FooMessage(_)" - //#exhaustiveness-check + // #exhaustiveness-check case FooMessage(_) => // avoid the warning in our build logs - //#exhaustiveness-check + // #exhaustiveness-check } } } diff --git a/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala b/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala index 0f30fa38f8..a314f80835 100644 --- a/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala +++ b/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala @@ -38,7 +38,7 @@ object FutureDocSpec { } } - //#pipe-to-usage + // #pipe-to-usage class ActorUsingPipeTo(target: ActorRef) extends Actor { // akka.pattern.pipe needs to be imported import akka.pattern.{ ask, pipe } @@ -52,18 +52,18 @@ object FutureDocSpec { future.pipeTo(sender()) // use the pipe pattern } } - //#pipe-to-usage + // #pipe-to-usage - //#pipe-to-returned-data + // #pipe-to-returned-data case class UserData(data: String) case class UserActivity(activity: String) - //#pipe-to-returned-data + // #pipe-to-returned-data - //#pipe-to-user-data-actor + // #pipe-to-user-data-actor class UserDataActor extends Actor { import UserDataActor._ - //holds the user data internally + // holds the user data internally var internalData: UserData = UserData("initial data") def receive = { @@ -75,9 +75,9 @@ object FutureDocSpec { object UserDataActor { case object Get } - //#pipe-to-user-data-actor + // #pipe-to-user-data-actor - //#pipe-to-user-activity-actor + // #pipe-to-user-activity-actor trait UserActivityRepository { def queryHistoricalActivities(userId: String): Future[List[UserActivity]] } @@ -98,9 +98,9 @@ object FutureDocSpec { object UserActivityActor { case object Get } - //#pipe-to-user-activity-actor + // #pipe-to-user-activity-actor - //#pipe-to-proxy-actor + // #pipe-to-proxy-actor class UserProxyActor(userData: ActorRef, userActivities: ActorRef) extends Actor { import UserProxyActor._ import akka.pattern.{ ask, pipe } @@ -115,15 +115,15 @@ object FutureDocSpec { (userActivities ? UserActivityActor.Get).pipeTo(sender()) } } - //#pipe-to-proxy-actor + // #pipe-to-proxy-actor - //#pipe-to-proxy-messages + // #pipe-to-proxy-messages object UserProxyActor { sealed trait Message case object GetUserData extends Message case object GetUserActivities extends Message } - //#pipe-to-proxy-messages + // #pipe-to-proxy-messages } @@ -135,7 +135,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage custom ExecutionContext" in { val yourExecutorServiceGoesHere = java.util.concurrent.Executors.newSingleThreadExecutor() - //#diy-execution-context + // #diy-execution-context import scala.concurrent.{ ExecutionContext, Promise } implicit val ec = ExecutionContext.fromExecutorService(yourExecutorServiceGoesHere) @@ -146,13 +146,13 @@ class FutureDocSpec extends AkkaSpec { // Then shut your ExecutionContext down at some // appropriate place in your program/application ec.shutdown() - //#diy-execution-context + // #diy-execution-context } "demonstrate usage of blocking from actor" in { val actor = system.actorOf(Props[MyActor]()) val msg = "hello" - //#ask-blocking + // #ask-blocking import scala.concurrent.Await import akka.pattern.ask import akka.util.Timeout @@ -161,7 +161,7 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds val future = actor ? msg // enabled by the “ask” import val result = Await.result(future, timeout.duration).asInstanceOf[String] - //#ask-blocking + // #ask-blocking result should be("HELLO") } @@ -170,17 +170,17 @@ class FutureDocSpec extends AkkaSpec { val actor = system.actorOf(Props[MyActor]()) val msg = "hello" implicit val timeout: Timeout = 5.seconds - //#map-to + // #map-to import scala.concurrent.Future import akka.pattern.ask val future: Future[String] = ask(actor, msg).mapTo[String] - //#map-to + // #map-to Await.result(future, timeout.duration) should be("HELLO") } "demonstrate usage of simple future eval" in { - //#future-eval + // #future-eval import scala.concurrent.Await import scala.concurrent.Future import scala.concurrent.duration._ @@ -189,12 +189,12 @@ class FutureDocSpec extends AkkaSpec { "Hello" + "World" } future.foreach(println) - //#future-eval + // #future-eval Await.result(future, 3 seconds) should be("HelloWorld") } "demonstrate usage of map" in { - //#map + // #map val f1 = Future { "Hello" + "World" } @@ -202,14 +202,14 @@ class FutureDocSpec extends AkkaSpec { x.length } f2.foreach(println) - //#map + // #map val result = Await.result(f2, 3 seconds) result should be(10) f1.value should be(Some(Success("HelloWorld"))) } "demonstrate wrong usage of nested map" in { - //#wrong-nested-map + // #wrong-nested-map val f1 = Future { "Hello" + "World" } @@ -220,12 +220,12 @@ class FutureDocSpec extends AkkaSpec { } } f3.foreach(println) - //#wrong-nested-map + // #wrong-nested-map Await.ready(f3, 3 seconds) } "demonstrate usage of flatMap" in { - //#flat-map + // #flat-map val f1 = Future { "Hello" + "World" } @@ -236,13 +236,13 @@ class FutureDocSpec extends AkkaSpec { } } f3.foreach(println) - //#flat-map + // #flat-map val result = Await.result(f3, 3 seconds) result should be(30) } "demonstrate usage of filter" in { - //#filter + // #filter val future1 = Future.successful(4) val future2 = future1.filter(_ % 2 == 0) @@ -254,15 +254,15 @@ class FutureDocSpec extends AkkaSpec { } failedFilter.foreach(println) - //#filter + // #filter val result = Await.result(future2, 3 seconds) result should be(4) val result2 = Await.result(failedFilter, 3 seconds) - result2 should be(0) //Can only be 0 when there was a MatchError + result2 should be(0) // Can only be 0 when there was a MatchError } "demonstrate usage of for comprehension" in { - //#for-comprehension + // #for-comprehension val f = for { a <- Future(10 / 2) // 10 / 2 = 5 b <- Future(a + 1) // 5 + 1 = 6 @@ -274,7 +274,7 @@ class FutureDocSpec extends AkkaSpec { // are not done in parallel. f.foreach(println) - //#for-comprehension + // #for-comprehension val result = Await.result(f, 3 seconds) result should be(24) } @@ -288,7 +288,7 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds import scala.concurrent.Await import akka.pattern.ask - //#composing-wrong + // #composing-wrong val f1 = ask(actor1, msg1) val f2 = ask(actor2, msg2) @@ -296,10 +296,10 @@ class FutureDocSpec extends AkkaSpec { val a = Await.result(f1, 3 seconds).asInstanceOf[Int] val b = Await.result(f2, 3 seconds).asInstanceOf[Int] - val f3 = ask(actor3, (a + b)) + val f3 = ask(actor3, a + b) val result = Await.result(f3, 3 seconds).asInstanceOf[Int] - //#composing-wrong + // #composing-wrong result should be(3) } @@ -312,7 +312,7 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds import scala.concurrent.Await import akka.pattern.ask - //#composing + // #composing val f1 = ask(actor1, msg1) val f2 = ask(actor2, msg2) @@ -320,11 +320,11 @@ class FutureDocSpec extends AkkaSpec { val f3 = for { a <- f1.mapTo[Int] b <- f2.mapTo[Int] - c <- ask(actor3, (a + b)).mapTo[Int] + c <- ask(actor3, a + b).mapTo[Int] } yield c f3.foreach(println) - //#composing + // #composing val result = Await.result(f3, 3 seconds).asInstanceOf[Int] result should be(3) } @@ -332,7 +332,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of sequence with actors" in { implicit val timeout: Timeout = 5.seconds val oddActor = system.actorOf(Props[OddActor]()) - //#sequence-ask + // #sequence-ask // oddActor returns odd numbers sequentially from 1 as a List[Future[Int]] val listOfFutures = List.fill(100)(akka.pattern.ask(oddActor, GetNext).mapTo[Int]) @@ -342,45 +342,45 @@ class FutureDocSpec extends AkkaSpec { // Find the sum of the odd numbers val oddSum = futureList.map(_.sum) oddSum.foreach(println) - //#sequence-ask + // #sequence-ask Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } "demonstrate usage of sequence" in { - //#sequence + // #sequence val futureList = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))) val oddSum = futureList.map(_.sum) oddSum.foreach(println) - //#sequence + // #sequence Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } "demonstrate usage of traverse" in { - //#traverse + // #traverse val futureList = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1)) val oddSum = futureList.map(_.sum) oddSum.foreach(println) - //#traverse + // #traverse Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } "demonstrate usage of fold" in { - //#fold + // #fold // Create a sequence of Futures val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.foldLeft(futures)(0)(_ + _) futureSum.foreach(println) - //#fold + // #fold Await.result(futureSum, 3 seconds) should be(1001000) } "demonstrate usage of reduce" in { - //#reduce + // #reduce // Create a sequence of Futures val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.reduceLeft(futures)(_ + _) futureSum.foreach(println) - //#reduce + // #reduce Await.result(futureSum, 3 seconds) should be(1001000) } @@ -388,12 +388,12 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds val actor = system.actorOf(Props[MyActor]()) val msg1 = -1 - //#recover + // #recover val future = akka.pattern.ask(actor, msg1).recover { case e: ArithmeticException => 0 } future.foreach(println) - //#recover + // #recover Await.result(future, 3.seconds) should be(0) } @@ -401,24 +401,24 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds val actor = system.actorOf(Props[MyActor]()) val msg1 = -1 - //#try-recover + // #try-recover val future = akka.pattern.ask(actor, msg1).recoverWith { case e: ArithmeticException => Future.successful(0) case foo: IllegalArgumentException => Future.failed[Int](new IllegalStateException("All br0ken!")) } future.foreach(println) - //#try-recover + // #try-recover Await.result(future, 3 seconds) should be(0) } "demonstrate usage of zip" in { val future1 = Future { "foo" } val future2 = Future { "bar" } - //#zip + // #zip val future3 = future1.zip(future2).map { case (a, b) => a + " " + b } future3.foreach(println) - //#zip + // #zip Await.result(future3, 3 seconds) should be("foo bar") } @@ -427,7 +427,7 @@ class FutureDocSpec extends AkkaSpec { val url = "foo bar" def log(cause: Throwable) = () def watchSomeTV(): Unit = () - //#and-then + // #and-then val result = Future { loadPage(url) } .andThen { case Failure(exception) => log(exception) @@ -436,7 +436,7 @@ class FutureDocSpec extends AkkaSpec { case _ => watchSomeTV() } result.foreach(println) - //#and-then + // #and-then Await.result(result, 3 seconds) should be("foo bar") } @@ -444,10 +444,10 @@ class FutureDocSpec extends AkkaSpec { val future1 = Future { "foo" } val future2 = Future { "bar" } val future3 = Future { "pigdog" } - //#fallback-to + // #fallback-to val future4 = future1.fallbackTo(future2).fallbackTo(future3) future4.foreach(println) - //#fallback-to + // #fallback-to Await.result(future4, 3 seconds) should be("foo") } @@ -455,27 +455,27 @@ class FutureDocSpec extends AkkaSpec { val future = Future { "foo" } def doSomethingOnSuccess(r: String) = () def doSomethingOnFailure(t: Throwable) = () - //#onComplete + // #onComplete future.onComplete { case Success(result) => doSomethingOnSuccess(result) case Failure(failure) => doSomethingOnFailure(failure) } - //#onComplete + // #onComplete Await.result(future, 3 seconds) should be("foo") } "demonstrate usage of Future.successful & Future.failed & Future.promise" in { - //#successful + // #successful val future = Future.successful("Yay!") - //#successful - //#failed + // #successful + // #failed val otherFuture = Future.failed[String](new IllegalArgumentException("Bang!")) - //#failed - //#promise + // #failed + // #promise val promise = Promise[String]() val theFuture = promise.future promise.success("hello") - //#promise + // #promise Await.result(future, 3 seconds) should be("Yay!") intercept[IllegalArgumentException] { Await.result(otherFuture, 3 seconds) } Await.result(theFuture, 3 seconds) should be("hello") @@ -484,25 +484,25 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of pattern.after" in { import akka.actor.typed.scaladsl.adapter.ClassicActorSystemOps implicit val system: ActorSystem[Nothing] = this.system.toTyped - //#after + // #after val delayed = akka.pattern.after(200.millis)(Future.failed(new IllegalStateException("OHNOES"))) val future = Future { Thread.sleep(1000); "foo" } val result = Future.firstCompletedOf(Seq(future, delayed)) - //#after + // #after intercept[IllegalStateException] { Await.result(result, 2 second) } } "demonstrate pattern.retry" in { import akka.actor.typed.scaladsl.adapter.ClassicActorSystemOps val system: ActorSystem[Nothing] = this.system.toTyped - //#retry + // #retry import akka.actor.typed.scaladsl.adapter._ implicit val scheduler: akka.actor.Scheduler = system.scheduler.toClassic implicit val ec: ExecutionContext = system.executionContext - //Given some future that will succeed eventually + // Given some future that will succeed eventually @volatile var failCount = 0 def futureToAttempt() = { if (failCount < 5) { @@ -511,25 +511,25 @@ class FutureDocSpec extends AkkaSpec { } else Future.successful(5) } - //Return a new future that will retry up to 10 times + // Return a new future that will retry up to 10 times val retried: Future[Int] = akka.pattern.retry(() => futureToAttempt(), attempts = 10, 100 milliseconds) - //#retry + // #retry Await.result(retried, 1 second) should ===(5) } "demonstrate context.dispatcher" in { - //#context-dispatcher + // #context-dispatcher class A extends Actor { import context.dispatcher val f = Future("hello") def receive = { - //#receive-omitted + // #receive-omitted case _ => - //#receive-omitted + // #receive-omitted } } - //#context-dispatcher + // #context-dispatcher } } diff --git a/akka-docs/src/test/scala/docs/io/EchoServer.scala b/akka-docs/src/test/scala/docs/io/EchoServer.scala index 5c09cab520..0043ec2227 100644 --- a/akka-docs/src/test/scala/docs/io/EchoServer.scala +++ b/akka-docs/src/test/scala/docs/io/EchoServer.scala @@ -50,12 +50,12 @@ class EchoManager(handlerClass: Class[_]) extends Actor with ActorLogging { log.warning(s"cannot bind to [$local]") context.stop(self) - //#echo-manager + // #echo-manager case Connected(remote, local) => log.info("received connection from {}", remote) val handler = context.actorOf(Props(handlerClass, sender(), remote)) sender() ! Register(handler, keepOpenOnPeerClosed = true) - //#echo-manager + // #echo-manager } } @@ -79,7 +79,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor // start out in optimistic write-through mode def receive = writing - //#writing + // #writing def writing: Receive = { case Received(data) => connection ! Write(data, Ack(currentOffset)) @@ -96,9 +96,9 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor if (storage.isEmpty) context.stop(self) else context.become(closing) } - //#writing + // #writing - //#buffering + // #buffering def buffering(nack: Int): Receive = { var toAck = 10 var peerClosed = false @@ -124,33 +124,33 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor else context.become(writing) } } - //#buffering + // #buffering - //#closing + // #closing def closing: Receive = { case CommandFailed(_: Write) => connection ! ResumeWriting context.become({ - case WritingResumed => - writeAll() - context.unbecome() + case WritingResumed => + writeAll() + context.unbecome() - case ack: Int => acknowledge(ack) + case ack: Int => acknowledge(ack) - }, discardOld = false) + }, discardOld = false) case Ack(ack) => acknowledge(ack) if (storage.isEmpty) context.stop(self) } - //#closing + // #closing override def postStop(): Unit = { log.info(s"transferred $transferred bytes from/to [$remote]") } - //#storage-omitted + // #storage-omitted private var storageOffset = 0 private var storage = Vector.empty[ByteString] private var stored = 0L @@ -163,7 +163,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor private def currentOffset = storageOffset + storage.size - //#helpers + // #helpers private def buffer(data: ByteString): Unit = { storage :+= data stored += data.size @@ -196,7 +196,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor suspended = false } } - //#helpers + // #helpers private def writeFirst(): Unit = { connection ! Write(storage(0), Ack(storageOffset)) @@ -208,7 +208,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor } } - //#storage-omitted + // #storage-omitted } //#echo-handler @@ -228,15 +228,15 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) extends connection ! Write(data, Ack) context.become({ - case Received(data) => buffer(data) - case Ack => acknowledge() - case PeerClosed => closing = true - }, discardOld = false) + case Received(data) => buffer(data) + case Ack => acknowledge() + case PeerClosed => closing = true + }, discardOld = false) case PeerClosed => context.stop(self) } - //#storage-omitted + // #storage-omitted override def postStop(): Unit = { log.info(s"transferred $transferred bytes from/to [$remote]") } @@ -251,7 +251,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) extends val lowWatermark = maxStored * 3 / 10 var suspended = false - //#simple-helpers + // #simple-helpers private def buffer(data: ByteString): Unit = { storage :+= data stored += data.size @@ -287,7 +287,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) extends else context.unbecome() } else connection ! Write(storage(0), Ack) } - //#simple-helpers - //#storage-omitted + // #simple-helpers + // #storage-omitted } //#simple-echo-handler diff --git a/akka-docs/src/test/scala/docs/io/IODocSpec.scala b/akka-docs/src/test/scala/docs/io/IODocSpec.scala index 5ad85e9fd8..96edbad732 100644 --- a/akka-docs/src/test/scala/docs/io/IODocSpec.scala +++ b/akka-docs/src/test/scala/docs/io/IODocSpec.scala @@ -15,12 +15,12 @@ import akka.testkit.AkkaSpec import scala.concurrent.duration._ class DemoActor extends Actor { - //#manager + // #manager import akka.io.{ IO, Tcp } import context.system // implicitly used by IO(Tcp) val manager = IO(Tcp) - //#manager + // #manager def receive = Actor.emptyBehavior } @@ -35,16 +35,16 @@ class Server extends Actor { def receive = { case b @ Bound(localAddress) => - //#do-some-logging-or-setup + // #do-some-logging-or-setup context.parent ! b - //#do-some-logging-or-setup + // #do-some-logging-or-setup case CommandFailed(_: Bind) => context.stop(self) case c @ Connected(remote, local) => - //#server + // #server context.parent ! c - //#server + // #server val handler = context.actorOf(Props[SimplisticHandler]()) val connection = sender() connection ! Register(handler) diff --git a/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala b/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala index 592d50fdf8..519c7bd492 100644 --- a/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala +++ b/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala @@ -21,28 +21,28 @@ object PullReadingExample { import context.system override def preStart(): Unit = - //#pull-mode-bind + // #pull-mode-bind IO(Tcp) ! Bind(self, new InetSocketAddress("localhost", 0), pullMode = true) - //#pull-mode-bind + // #pull-mode-bind def receive = { - //#pull-accepting + // #pull-accepting case Bound(localAddress) => // Accept connections one by one sender() ! ResumeAccepting(batchSize = 1) context.become(listening(sender())) - //#pull-accepting + // #pull-accepting monitor ! localAddress } - //#pull-accepting-cont + // #pull-accepting-cont def listening(listener: ActorRef): Receive = { case Connected(remote, local) => val handler = context.actorOf(Props(classOf[PullEcho], sender())) sender() ! Register(handler, keepOpenOnPeerClosed = true) listener ! ResumeAccepting(batchSize = 1) } - //#pull-accepting-cont + // #pull-accepting-cont } @@ -50,14 +50,14 @@ object PullReadingExample { class PullEcho(connection: ActorRef) extends Actor { - //#pull-reading-echo + // #pull-reading-echo override def preStart(): Unit = connection ! ResumeReading def receive = { case Received(data) => connection ! Write(data, Ack) case Ack => connection ! ResumeReading } - //#pull-reading-echo + // #pull-reading-echo } } @@ -69,9 +69,9 @@ class PullReadingSpec extends AkkaSpec with ImplicitSender { system.actorOf(Props(classOf[PullReadingExample.Listener], probe.ref), "server") val listenAddress = probe.expectMsgType[InetSocketAddress] - //#pull-mode-connect + // #pull-mode-connect IO(Tcp) ! Connect(listenAddress, pullMode = true) - //#pull-mode-connect + // #pull-mode-connect expectMsgType[Connected] val connection = lastSender diff --git a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala index 724b9d3128..196bd58c98 100644 --- a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala +++ b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala @@ -31,16 +31,16 @@ final case class MulticastGroup(address: String, interface: String) extends Sock //#multicast-group class Listener(iface: String, group: String, port: Int, sink: ActorRef) extends Actor with ActorLogging { - //#bind + // #bind import context.system val opts = List(Inet6ProtocolFamily(), MulticastGroup(group, iface)) IO(Udp) ! Udp.Bind(self, new InetSocketAddress(port), opts) - //#bind + // #bind def receive = { case b @ Udp.Bound(to) => log.info("Bound to {}", to) - sink ! (b) + sink ! b case Udp.Received(data, remote) => val msg = data.decodeString("utf-8") log.info("Received '{}' from {}", msg, remote) diff --git a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala index 436bbf0624..f292985e62 100644 --- a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala +++ b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala @@ -28,11 +28,10 @@ class ScalaUdpMulticastSpec "listener" should { "send message back to sink" in { val ipv6ifaces = - NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter( - iface => - iface.supportsMulticast && - iface.isUp && - iface.getInetAddresses.asScala.exists(_.isInstanceOf[Inet6Address])) + NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter(iface => + iface.supportsMulticast && + iface.isUp && + iface.getInetAddresses.asScala.exists(_.isInstanceOf[Inet6Address])) if (ipv6ifaces.isEmpty) { // IPv6 not supported for any interface on this platform diff --git a/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala b/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala index 4b371c8716..4fea5eb164 100644 --- a/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala +++ b/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala @@ -19,7 +19,7 @@ import akka.io.UdpConnected object ScalaUdpDocSpec { - //#sender + // #sender class SimpleSender(remote: InetSocketAddress) extends Actor { import context.system IO(Udp) ! Udp.SimpleSender @@ -27,49 +27,49 @@ object ScalaUdpDocSpec { def receive = { case Udp.SimpleSenderReady => context.become(ready(sender())) - //#sender + // #sender sender() ! Udp.Send(ByteString("hello"), remote) - //#sender + // #sender } def ready(send: ActorRef): Receive = { case msg: String => send ! Udp.Send(ByteString(msg), remote) - //#sender + // #sender if (msg == "world") send ! PoisonPill - //#sender + // #sender } } - //#sender + // #sender - //#listener + // #listener class Listener(nextActor: ActorRef) extends Actor { import context.system IO(Udp) ! Udp.Bind(self, new InetSocketAddress("localhost", 0)) def receive = { case Udp.Bound(local) => - //#listener + // #listener nextActor.forward(local) - //#listener + // #listener context.become(ready(sender())) } def ready(socket: ActorRef): Receive = { case Udp.Received(data, remote) => val processed = // parse data etc., e.g. using PipelineStage - //#listener + // #listener data.utf8String - //#listener + // #listener socket ! Udp.Send(data, remote) // example server echoes back nextActor ! processed case Udp.Unbind => socket ! Udp.Unbind case Udp.Unbound => context.stop(self) } } - //#listener + // #listener - //#connected + // #connected class Connected(remote: InetSocketAddress) extends Actor { import context.system IO(UdpConnected) ! UdpConnected.Connect(self, remote) @@ -77,18 +77,18 @@ object ScalaUdpDocSpec { def receive = { case UdpConnected.Connected => context.become(ready(sender())) - //#connected + // #connected sender() ! UdpConnected.Send(ByteString("hello")) - //#connected + // #connected } def ready(connection: ActorRef): Receive = { case UdpConnected.Received(data) => // process data, send it on, etc. - //#connected + // #connected if (data.utf8String == "hello") connection ! UdpConnected.Send(ByteString("world")) - //#connected + // #connected case msg: String => connection ! UdpConnected.Send(ByteString(msg)) case UdpConnected.Disconnect => @@ -96,7 +96,7 @@ object ScalaUdpDocSpec { case UdpConnected.Disconnected => context.stop(self) } } - //#connected + // #connected } diff --git a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala index 2041c346c6..9941667de2 100644 --- a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala @@ -15,7 +15,7 @@ class BackoffSupervisorDocSpec { val system: ActorSystem = ??? import scala.concurrent.duration._ - //#backoff-stop + // #backoff-stop val childProps = Props(classOf[EchoActor]) val supervisor = BackoffSupervisor.props( @@ -28,14 +28,14 @@ class BackoffSupervisorDocSpec { )) system.actorOf(supervisor, name = "echoSupervisor") - //#backoff-stop + // #backoff-stop } class BackoffSupervisorDocSpecExampleFail { val system: ActorSystem = ??? import scala.concurrent.duration._ - //#backoff-fail + // #backoff-fail val childProps = Props(classOf[EchoActor]) val supervisor = BackoffSupervisor.props( @@ -48,7 +48,7 @@ class BackoffSupervisorDocSpec { )) system.actorOf(supervisor, name = "echoSupervisor") - //#backoff-fail + // #backoff-fail } class BackoffSupervisorDocSpecExampleStopOptions { @@ -57,7 +57,7 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) - //#backoff-custom-stop + // #backoff-custom-stop val supervisor = BackoffSupervisor.props( BackoffOpts .onStop( @@ -70,7 +70,7 @@ class BackoffSupervisorDocSpec { .withManualReset // the child must send BackoffSupervisor.Reset to its parent .withDefaultStoppingStrategy // Stop at any Exception thrown ) - //#backoff-custom-stop + // #backoff-custom-stop system.actorOf(supervisor, name = "echoSupervisor") } @@ -81,7 +81,7 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) - //#backoff-custom-fail + // #backoff-custom-fail val supervisor = BackoffSupervisor.props( BackoffOpts .onFailure( @@ -96,7 +96,7 @@ class BackoffSupervisorDocSpec { case _: MyException => SupervisorStrategy.Restart case _ => SupervisorStrategy.Escalate })) - //#backoff-custom-fail + // #backoff-custom-fail system.actorOf(supervisor, name = "echoSupervisor") } @@ -112,15 +112,15 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) - //#backoff-sharded + // #backoff-sharded val supervisor = BackoffSupervisor.props( BackoffOpts .onStop(childProps, childName = "myEcho", minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2) .withFinalStopMessage(_ == StopMessage)) - //#backoff-sharded + // #backoff-sharded - //#backoff-sharded-passivation + // #backoff-sharded-passivation context.parent ! Passivate(StopMessage) - //#backoff-sharded-passivation + // #backoff-sharded-passivation } } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala index 029e2bb22a..b5b197dd5b 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala @@ -29,57 +29,57 @@ object PersistenceDocSpec { object RecoverySample { trait MyPersistentActor1 extends PersistentActor { - //#recovery-disabled + // #recovery-disabled override def recovery = Recovery.none - //#recovery-disabled + // #recovery-disabled } trait MyPersistentActor2 extends PersistentActor { - //#recovery-custom + // #recovery-custom override def recovery = Recovery(toSequenceNr = 457L) - //#recovery-custom + // #recovery-custom } class MyPersistentActor4 extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - //#recovery-completed + // #recovery-completed override def receiveRecover: Receive = { case RecoveryCompleted => // perform init after recovery, before any other messages - //... - case evt => //... + // ... + case evt => // ... } override def receiveCommand: Receive = { - case msg => //... + case msg => // ... } - //#recovery-completed + // #recovery-completed } trait MyPersistentActor5 extends PersistentActor { - //#recovery-no-snap + // #recovery-no-snap override def recovery = Recovery(fromSnapshot = SnapshotSelectionCriteria.None) - //#recovery-no-snap + // #recovery-no-snap } } object PersistenceId { trait PersistentActorMethods { - //#persistence-id + // #persistence-id def persistenceId: String - //#persistence-id - //#recovery-status + // #persistence-id + // #recovery-status def recoveryRunning: Boolean def recoveryFinished: Boolean - //#recovery-status + // #recovery-status } class MyPersistentActor1 extends PersistentActor with PersistentActorMethods { - //#persistence-id-override + // #persistence-id-override override def persistenceId = "my-stable-persistence-id" - //#persistence-id-override + // #persistence-id-override override def receiveRecover: Receive = { case _ => @@ -93,18 +93,18 @@ object PersistenceDocSpec { object BackoffOnStop { abstract class MyActor extends Actor { import PersistAsync.MyPersistentActor - //#backoff + // #backoff val childProps = Props[MyPersistentActor]() val props = BackoffSupervisor.props(BackoffOpts .onStop(childProps, childName = "myActor", minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2)) context.actorOf(props, name = "mySupervisor") - //#backoff + // #backoff } } object AtLeastOnce { - //#at-least-once-example + // #at-least-once-example import akka.actor.{ Actor, ActorSelection } import akka.persistence.AtLeastOnceDelivery @@ -143,7 +143,7 @@ object PersistenceDocSpec { sender() ! Confirm(deliveryId) } } - //#at-least-once-example + // #at-least-once-example } object SaveSnapshot { @@ -153,7 +153,7 @@ object PersistenceDocSpec { def updateState(event: String): Unit = {} - //#save-snapshot + // #save-snapshot var state: Any = _ val snapShotInterval = 1000 @@ -167,7 +167,7 @@ object PersistenceDocSpec { saveSnapshot(state) } } - //#save-snapshot + // #save-snapshot override def receiveRecover: Receive = ??? } @@ -177,13 +177,13 @@ object PersistenceDocSpec { class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - //#snapshot-criteria + // #snapshot-criteria override def recovery = Recovery( fromSnapshot = SnapshotSelectionCriteria(maxSequenceNr = 457L, maxTimestamp = System.currentTimeMillis)) - //#snapshot-criteria + // #snapshot-criteria - //#snapshot-offer + // #snapshot-offer var state: Any = _ override def receiveRecover: Receive = { @@ -191,7 +191,7 @@ object PersistenceDocSpec { case RecoveryCompleted => case event => // ... } - //#snapshot-offer + // #snapshot-offer override def receiveCommand: Receive = ??? } @@ -200,7 +200,7 @@ object PersistenceDocSpec { object PersistAsync { - //#persist-async + // #persist-async class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" @@ -234,12 +234,12 @@ object PersistenceDocSpec { // evt-b-1 // evt-b-2 - //#persist-async + // #persist-async } object Defer { - //#defer + // #defer class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" @@ -263,9 +263,9 @@ object PersistenceDocSpec { } } } - //#defer + // #defer - //#defer-caller + // #defer-caller persistentActor ! "a" persistentActor ! "b" @@ -279,11 +279,11 @@ object PersistenceDocSpec { // evt-b-2 // evt-b-3 - //#defer-caller + // #defer-caller } object DeferWithPersist { - //#defer-with-persist + // #defer-with-persist class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" @@ -307,7 +307,7 @@ object PersistenceDocSpec { } } } - //#defer-with-persist + // #defer-with-persist } object NestedPersists { @@ -319,7 +319,7 @@ object PersistenceDocSpec { case _ => // handle recovery here } - //#nested-persist-persist + // #nested-persist-persist override def receiveCommand: Receive = { case c: String => sender() ! c @@ -338,10 +338,10 @@ object PersistenceDocSpec { } } } - //#nested-persist-persist + // #nested-persist-persist } - //#nested-persist-persist-caller + // #nested-persist-persist-caller persistentActor ! "a" persistentActor ! "b" @@ -358,7 +358,7 @@ object PersistenceDocSpec { // b-inner-1 // b-inner-2 - //#nested-persist-persist-caller + // #nested-persist-persist-caller class MyPersistAsyncActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" @@ -367,7 +367,7 @@ object PersistenceDocSpec { case _ => // handle recovery here } - //#nested-persistAsync-persistAsync + // #nested-persistAsync-persistAsync override def receiveCommand: Receive = { case c: String => sender() ! c @@ -384,10 +384,10 @@ object PersistenceDocSpec { } } } - //#nested-persistAsync-persistAsync + // #nested-persistAsync-persistAsync } - //#nested-persistAsync-persistAsync-caller + // #nested-persistAsync-persistAsync-caller persistentActor ! "a" persistentActor ! "b" @@ -407,12 +407,12 @@ object PersistenceDocSpec { // a -> a-outer-1 -> a-outer-2 -> a-inner-1 -> a-inner-2 // b -> b-outer-1 -> b-outer-2 -> b-inner-1 -> b-inner-2 - //#nested-persistAsync-persistAsync-caller + // #nested-persistAsync-persistAsync-caller } object AvoidPoisonPill { - //#safe-shutdown + // #safe-shutdown /** Explicit shutdown message */ case object Shutdown @@ -431,9 +431,9 @@ object PersistenceDocSpec { case _ => // handle recovery here } } - //#safe-shutdown + // #safe-shutdown - //#safe-shutdown-example-bad + // #safe-shutdown-example-bad // UN-SAFE, due to PersistentActor's command stashing: persistentActor ! "a" persistentActor ! "b" @@ -444,9 +444,9 @@ object PersistenceDocSpec { // PoisonPill is an AutoReceivedMessage, is handled automatically // !! stop !! // Actor is stopped without handling `b` nor the `a` handler! - //#safe-shutdown-example-bad + // #safe-shutdown-example-bad - //#safe-shutdown-example-good + // #safe-shutdown-example-good // SAFE: persistentActor ! "a" persistentActor ! "b" @@ -462,7 +462,7 @@ object PersistenceDocSpec { // # unstashing; internal-stash = [] // Shutdown // -- stop -- - //#safe-shutdown-example-good + // #safe-shutdown-example-good } } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala index 785c4c48f5..009512e044 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala @@ -17,12 +17,12 @@ object PersistenceMultiDocSpec { //#default-config """ - //#default-plugins + // #default-plugins trait ActorWithDefaultPlugins extends PersistentActor { override def persistenceId = "123" } - //#default-plugins + // #default-plugins val OverrideConfig = s""" @@ -44,7 +44,7 @@ object PersistenceMultiDocSpec { //#override-config """ - //#override-plugins + // #override-plugins trait ActorWithOverridePlugins extends PersistentActor { override def persistenceId = "123" @@ -55,9 +55,9 @@ object PersistenceMultiDocSpec { override def snapshotPluginId = "akka.persistence.chronicle.snapshot-store" } - //#override-plugins + // #override-plugins - //#runtime-config + // #runtime-config trait ActorWithRuntimePluginConfig extends PersistentActor with RuntimePluginConfig { // Variable that is retrieved at runtime, from an external service for instance. val runtimeDistinction = "foo" @@ -92,5 +92,5 @@ object PersistenceMultiDocSpec { } - //#runtime-config + // #runtime-config } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala index 5c3c0bc6ac..8ca429a3fe 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala @@ -125,7 +125,7 @@ object SharedLeveldbPluginDocSpec { //#event-adapter-config """ - //#shared-store-usage + // #shared-store-usage trait SharedStoreUsage extends Actor { override def preStart(): Unit = { context.actorSelection("akka://example@127.0.0.1:2552/user/store") ! Identify(1) @@ -136,7 +136,7 @@ object SharedLeveldbPluginDocSpec { SharedLeveldbJournal.setStore(store, context.system) } } - //#shared-store-usage + // #shared-store-usage } trait SharedLeveldbPluginDocSpec { @@ -144,22 +144,22 @@ trait SharedLeveldbPluginDocSpec { { import akka.actor._ - //#shared-store-creation + // #shared-store-creation import akka.persistence.journal.leveldb.SharedLeveldbStore val store = system.actorOf(Props[SharedLeveldbStore](), "store") - //#shared-store-creation + // #shared-store-creation } } class MyJournal extends AsyncWriteJournal { - //#sync-journal-plugin-api + // #sync-journal-plugin-api def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = Future.fromTry(Try { // blocking call here ??? }) - //#sync-journal-plugin-api + // #sync-journal-plugin-api def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = ??? def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( @@ -184,7 +184,7 @@ object PersistenceTCKDoc { object example1 { import akka.persistence.journal.JournalSpec - //#journal-tck-scala + // #journal-tck-scala class MyJournalSpec extends JournalSpec( config = ConfigFactory.parseString("""akka.persistence.journal.plugin = "my.journal.plugin"""")) { @@ -195,12 +195,12 @@ object PersistenceTCKDoc { override def supportsSerialization: CapabilityFlag = true // or CapabilityFlag.on } - //#journal-tck-scala + // #journal-tck-scala } object example2 { import akka.persistence.snapshot.SnapshotStoreSpec - //#snapshot-store-tck-scala + // #snapshot-store-tck-scala class MySnapshotStoreSpec extends SnapshotStoreSpec( config = ConfigFactory.parseString(""" @@ -210,7 +210,7 @@ object PersistenceTCKDoc { override def supportsSerialization: CapabilityFlag = true // or CapabilityFlag.on } - //#snapshot-store-tck-scala + // #snapshot-store-tck-scala } object example3 { import java.io.File @@ -218,7 +218,7 @@ object PersistenceTCKDoc { import akka.persistence.journal.JournalSpec import org.iq80.leveldb.util.FileUtils - //#journal-tck-before-after-scala + // #journal-tck-before-after-scala class MyJournalSpec extends JournalSpec(config = ConfigFactory.parseString(""" akka.persistence.journal.plugin = "my.journal.plugin" @@ -242,6 +242,6 @@ object PersistenceTCKDoc { } } - //#journal-tck-before-after-scala + // #journal-tck-before-after-scala } } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala index 5df7db1a11..7ec054e6fc 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala @@ -47,7 +47,7 @@ class PersistenceSchemaEvolutionDocSpec extends AnyWordSpec { class ProtobufReadOptional { - //#protobuf-read-optional-model + // #protobuf-read-optional-model sealed abstract class SeatType { def code: String } object SeatType { def fromString(s: String) = s match { @@ -64,9 +64,9 @@ class ProtobufReadOptional { } case class SeatReserved(letter: String, row: Int, seatType: SeatType) - //#protobuf-read-optional-model + // #protobuf-read-optional-model - //#protobuf-read-optional + // #protobuf-read-optional /** * Example serializer impl which uses protocol buffers generated classes (proto.*) * to perform the to/from binary marshalling. @@ -107,7 +107,7 @@ class ProtobufReadOptional { if (p.hasSeatType) SeatType.fromString(p.getSeatType) else SeatType.Unknown } - //#protobuf-read-optional + // #protobuf-read-optional } class ProtoBufRename { @@ -127,7 +127,7 @@ class ProtoBufRename { } class RenamePlainJson { - //#rename-plain-json + // #rename-plain-json class JsonRenamedFieldAdapter extends EventAdapter { val marshaller = new ExampleJsonMarshaller @@ -159,16 +159,16 @@ class RenamePlainJson { } } - //#rename-plain-json + // #rename-plain-json } object SimplestCustomSerializer { - //#simplest-custom-serializer-model + // #simplest-custom-serializer-model final case class Person(name: String, surname: String) - //#simplest-custom-serializer-model + // #simplest-custom-serializer-model - //#simplest-custom-serializer + // #simplest-custom-serializer /** * Simplest possible serializer, uses a string representation of the Person class. * @@ -208,7 +208,7 @@ object SimplestCustomSerializer { } - //#simplest-custom-serializer + // #simplest-custom-serializer } class PersonSerializerSettingsBox { diff --git a/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala index 43f8e2cad5..c7af2c2511 100644 --- a/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala @@ -11,7 +11,7 @@ import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal import akka.stream.scaladsl.Source object LeveldbPersistenceQueryDocSpec { - //#tagger + // #tagger import akka.persistence.journal.WriteEventAdapter import akka.persistence.journal.Tagged @@ -29,7 +29,7 @@ object LeveldbPersistenceQueryDocSpec { override def manifest(event: Any): String = "" } - //#tagger + // #tagger } class LeveldbPersistenceQueryDocSpec @@ -37,40 +37,40 @@ class LeveldbPersistenceQueryDocSpec "LeveldbPersistentQuery" must { "demonstrate how get ReadJournal" in { - //#get-read-journal + // #get-read-journal import akka.persistence.query.PersistenceQuery import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) - //#get-read-journal + // #get-read-journal } "demonstrate EventsByPersistenceId" in { - //#EventsByPersistenceId + // #EventsByPersistenceId val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[EventEnvelope, NotUsed] = queries.eventsByPersistenceId("some-persistence-id", 0L, Long.MaxValue) val events: Source[Any, NotUsed] = src.map(_.event) - //#EventsByPersistenceId + // #EventsByPersistenceId } "demonstrate AllPersistenceIds" in { - //#AllPersistenceIds + // #AllPersistenceIds val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[String, NotUsed] = queries.persistenceIds() - //#AllPersistenceIds + // #AllPersistenceIds } "demonstrate EventsByTag" in { - //#EventsByTag + // #EventsByTag val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[EventEnvelope, NotUsed] = queries.eventsByTag(tag = "green", offset = Sequence(0L)) - //#EventsByTag + // #EventsByTag } } diff --git a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala index 6ae821e2ca..2096426bc0 100644 --- a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala @@ -26,14 +26,14 @@ object PersistenceQueryDocSpec { implicit val timeout: Timeout = Timeout(3.seconds) - //#advanced-journal-query-types + // #advanced-journal-query-types final case class RichEvent(tags: Set[String], payload: Any) // a plugin can provide: case class QueryMetadata(deterministicOrder: Boolean, infinite: Boolean) - //#advanced-journal-query-types + // #advanced-journal-query-types - //#my-read-journal + // #my-read-journal class MyReadJournalProvider(system: ExtendedActorSystem, config: Config) extends ReadJournalProvider { private val readJournal: MyScaladslReadJournal = @@ -70,7 +70,7 @@ object PersistenceQueryDocSpec { override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = offset match { case Sequence(offsetValue) => Source.fromGraph(new MyEventsByTagSource(tag, offsetValue, refreshInterval)) - case NoOffset => eventsByTag(tag, Sequence(0L)) //recursive + case NoOffset => eventsByTag(tag, Sequence(0L)) // recursive case _ => throw new IllegalArgumentException("MyJournal does not support " + offset.getClass.getName + " offsets") } @@ -95,9 +95,9 @@ object PersistenceQueryDocSpec { // possibility to add more plugin specific queries - //#advanced-journal-query-definition + // #advanced-journal-query-definition def byTagsWithMeta(tags: Set[String]): Source[RichEvent, QueryMetadata] = { - //#advanced-journal-query-definition + // #advanced-journal-query-definition // implement in a similar way as eventsByTag ??? } @@ -134,7 +134,7 @@ object PersistenceQueryDocSpec { } } - //#my-read-journal + // #my-read-journal case class ComplexState() { def readyToSave = false @@ -152,7 +152,7 @@ object PersistenceQueryDocSpec { def batchWriter: Subscriber[immutable.Seq[Any]] = ??? } - //#projection-into-different-store-rs + // #projection-into-different-store-rs implicit val system: ActorSystem = ActorSystem() val readJournal = @@ -167,7 +167,7 @@ object PersistenceQueryDocSpec { .map(convertToReadSideTypes) // convert to datatype .grouped(20) // batch inserts into groups of 20 .runWith(Sink.fromSubscriber(dbBatchWriter)) // write batches to read-side database - //#projection-into-different-store-rs + // #projection-into-different-store-rs } } @@ -184,7 +184,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { """) class BasicUsage { - //#basic-usage + // #basic-usage // obtain read journal by plugin id val readJournal = PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") @@ -197,19 +197,19 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { source.runForeach { event => println("Event: " + event) } - //#basic-usage + // #basic-usage - //#all-persistence-ids-live + // #all-persistence-ids-live readJournal.persistenceIds() - //#all-persistence-ids-live + // #all-persistence-ids-live - //#all-persistence-ids-snap + // #all-persistence-ids-snap readJournal.currentPersistenceIds() - //#all-persistence-ids-snap + // #all-persistence-ids-snap trait OrderCompleted - //#events-by-tag + // #events-by-tag // assuming journal is able to work with numeric offsets we can: val completedOrders: Source[EventEnvelope, NotUsed] = @@ -225,14 +225,14 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { // start another query, from the known offset val furtherOrders = readJournal.eventsByTag("order-completed", offset = Sequence(10)) - //#events-by-tag + // #events-by-tag - //#events-by-persistent-id + // #events-by-persistent-id readJournal.eventsByPersistenceId("user-us-1337", fromSequenceNr = 0L, toSequenceNr = Long.MaxValue) - //#events-by-persistent-id + // #events-by-persistent-id - //#advanced-journal-query-usage + // #advanced-journal-query-usage val query: Source[RichEvent, QueryMetadata] = readJournal.byTagsWithMeta(Set("red", "blue")) @@ -248,27 +248,27 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { } .runWith(Sink.ignore) - //#advanced-journal-query-usage + // #advanced-journal-query-usage } - //#projection-into-different-store + // #projection-into-different-store class MyResumableProjection(name: String) { def saveProgress(offset: Offset): Future[Long] = ??? def latestOffset: Future[Long] = ??? } - //#projection-into-different-store + // #projection-into-different-store class RunWithAsyncFunction { val readJournal = PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") - //#projection-into-different-store-simple-classes + // #projection-into-different-store-simple-classes trait ExampleStore { def save(event: Any): Future[Unit] } - //#projection-into-different-store-simple-classes + // #projection-into-different-store-simple-classes - //#projection-into-different-store-simple + // #projection-into-different-store-simple val store: ExampleStore = ??? readJournal @@ -277,7 +277,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { store.save(e) } .runWith(Sink.ignore) - //#projection-into-different-store-simple + // #projection-into-different-store-simple } } diff --git a/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala b/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala index 175bf72e42..205fc922ab 100644 --- a/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala +++ b/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala @@ -11,7 +11,7 @@ import com.typesafe.config.ConfigFactory object TestKitTypedConf { - //#testkit-typed-conf + // #testkit-typed-conf val yourConfiguration = ConfigFactory.defaultApplication() @@ -20,13 +20,13 @@ object TestKitTypedConf { val testKit = PersistenceTestKit(system) - //#testkit-typed-conf + // #testkit-typed-conf } object SnapshotTypedConf { - //#snapshot-typed-conf + // #snapshot-typed-conf val yourConfiguration = ConfigFactory.defaultApplication() @@ -37,6 +37,6 @@ object SnapshotTypedConf { val testKit = SnapshotTestKit(system) - //#snapshot-typed-conf + // #snapshot-typed-conf } diff --git a/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala b/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala index 1466213864..d60917e18b 100644 --- a/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala @@ -27,11 +27,11 @@ class PersistenceInitSpec extends ScalaTestWithActorTestKit(s""" "PersistenceInit" should { "initialize plugins" in { - //#init + // #init val timeout = 5.seconds val done: Future[Done] = PersistenceInit.initializeDefaultPlugins(system, timeout) Await.result(done, timeout) - //#init + // #init } } } diff --git a/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala b/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala index 9ebe01142e..0d80a00a9a 100644 --- a/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala +++ b/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala @@ -68,14 +68,14 @@ import akka.persistence.testkit._ class SampleEventStoragePolicy extends EventStorage.JournalPolicies.PolicyType { - //you can use internal state, it does not need to be thread safe + // you can use internal state, it does not need to be thread safe var count = 1 override def tryProcess(persistenceId: String, processingUnit: JournalOperation): ProcessingResult = if (count < 10) { count += 1 - //check the type of operation and react with success or with reject or with failure. - //if you return ProcessingSuccess the operation will be performed, otherwise not. + // check the type of operation and react with success or with reject or with failure. + // if you return ProcessingSuccess the operation will be performed, otherwise not. processingUnit match { case ReadEvents(batch) if batch.nonEmpty => ProcessingSuccess case WriteEvents(batch) if batch.size > 1 => @@ -94,14 +94,14 @@ class SampleEventStoragePolicy extends EventStorage.JournalPolicies.PolicyType { //#set-snapshot-storage-policy class SampleSnapshotStoragePolicy extends SnapshotStorage.SnapshotPolicies.PolicyType { - //you can use internal state, it does not need to be thread safe + // you can use internal state, it does not need to be thread safe var count = 1 override def tryProcess(persistenceId: String, processingUnit: SnapshotOperation): ProcessingResult = if (count < 10) { count += 1 - //check the type of operation and react with success or with reject or with failure. - //if you return ProcessingSuccess the operation will be performed, otherwise not. + // check the type of operation and react with success or with reject or with failure. + // if you return ProcessingSuccess the operation will be performed, otherwise not. processingUnit match { case ReadSnapshot(_, payload) if payload.nonEmpty => ProcessingSuccess diff --git a/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala b/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala index b9a31ce160..8d879199a6 100644 --- a/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala +++ b/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala @@ -35,35 +35,35 @@ class RemoteDeploymentDocSpec extends AkkaSpec(""" override def afterTermination(): Unit = { shutdown(other) } "demonstrate programmatic deployment" in { - //#deploy + // #deploy val ref = system.actorOf(Props[SampleActor]().withDeploy(Deploy(scope = RemoteScope(address)))) - //#deploy + // #deploy ref.path.address should be(address) ref ! "test" expectMsgType[ActorRef].path.address should be(address) } def makeAddress(): Unit = { - //#make-address-artery + // #make-address-artery val one = AddressFromURIString("akka://sys@host:1234") val two = Address("akka", "sys", "host", 1234) // this gives the same - //#make-address-artery + // #make-address-artery } "demonstrate address extractor" in { - //#make-address + // #make-address val one = AddressFromURIString("akka://sys@host:1234") val two = Address("akka", "sys", "host", 1234) // this gives the same - //#make-address + // #make-address one should be(two) } "demonstrate sampleActor" in { - //#sample-actor + // #sample-actor val actor = system.actorOf(Props[SampleActor](), "sampleActor") actor ! "Pretty slick" - //#sample-actor + // #sample-actor } } diff --git a/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala index 572f908459..a28a35298f 100644 --- a/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala @@ -11,7 +11,7 @@ import akka.actor.ActorRef object ConsistentHashingRouterDocSpec { - //#cache-actor + // #cache-actor import akka.actor.Actor import akka.routing.ConsistentHashingRouter.ConsistentHashable @@ -32,7 +32,7 @@ object ConsistentHashingRouterDocSpec { } final case class Entry(key: String, value: String) - //#cache-actor + // #cache-actor } @@ -44,7 +44,7 @@ class ConsistentHashingRouterDocSpec extends AkkaSpec with ImplicitSender { def context = system - //#consistent-hashing-router + // #consistent-hashing-router import akka.actor.Props import akka.routing.ConsistentHashingPool import akka.routing.ConsistentHashingRouter.ConsistentHashMapping @@ -70,7 +70,7 @@ class ConsistentHashingRouterDocSpec extends AkkaSpec with ImplicitSender { cache ! Get("hi") expectMsg(None) - //#consistent-hashing-router + // #consistent-hashing-router } diff --git a/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala index a0a27a4811..9623ad4a15 100644 --- a/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala @@ -40,7 +40,7 @@ akka.actor.deployment { #//#jconfig """ - //#routing-logic + // #routing-logic import scala.collection.immutable import java.util.concurrent.ThreadLocalRandom import akka.routing.RoundRobinRoutingLogic @@ -55,7 +55,7 @@ akka.actor.deployment { SeveralRoutees(targets) } } - //#routing-logic + // #routing-logic class Storage extends Actor { def receive = { @@ -63,12 +63,12 @@ akka.actor.deployment { } } - //#unit-test-logic + // #unit-test-logic final case class TestRoutee(n: Int) extends Routee { override def send(message: Any, sender: ActorRef): Unit = () } - //#unit-test-logic + // #unit-test-logic } //#group @@ -98,7 +98,7 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl import akka.routing.SeveralRoutees "unit test routing logic" in { - //#unit-test-logic + // #unit-test-logic val logic = new RedundancyRoutingLogic(nbrCopies = 3) val routees = for (n <- 1 to 7) yield TestRoutee(n) @@ -111,26 +111,26 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl val r3 = logic.select("msg", routees) r3.asInstanceOf[SeveralRoutees].routees should be(Vector(TestRoutee(7), TestRoutee(1), TestRoutee(2))) - //#unit-test-logic + // #unit-test-logic } "demonstrate usage of custom router" in { - //#usage-1 + // #usage-1 for (n <- 1 to 10) system.actorOf(Props[Storage](), "s" + n) - val paths = for (n <- 1 to 10) yield ("/user/s" + n) + val paths = for (n <- 1 to 10) yield "/user/s" + n val redundancy1: ActorRef = system.actorOf(RedundancyGroup(paths, nbrCopies = 3).props(), name = "redundancy1") redundancy1 ! "important" - //#usage-1 + // #usage-1 for (_ <- 1 to 3) expectMsg("important") - //#usage-2 + // #usage-2 val redundancy2: ActorRef = system.actorOf(FromConfig.props(), name = "redundancy2") redundancy2 ! "very important" - //#usage-2 + // #usage-2 for (_ <- 1 to 5) expectMsg("very important") diff --git a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala index b9d19d6fb7..de5a7e2079 100644 --- a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala @@ -293,7 +293,7 @@ router-dispatcher {} final case class Work(payload: String) - //#router-in-actor + // #router-in-actor import akka.routing.{ ActorRefRoutee, RoundRobinRoutingLogic, Router } class Master extends Actor { @@ -316,7 +316,7 @@ router-dispatcher {} router = router.addRoutee(r) } } - //#router-in-actor + // #router-in-actor class Worker extends Actor { def receive = { @@ -324,13 +324,13 @@ router-dispatcher {} } } - //#create-worker-actors + // #create-worker-actors class Workers extends Actor { context.actorOf(Props[Worker](), name = "w1") context.actorOf(Props[Worker](), name = "w2") context.actorOf(Props[Worker](), name = "w3") // ... - //#create-worker-actors + // #create-worker-actors def receive = { case _ => @@ -339,64 +339,64 @@ router-dispatcher {} class Parent extends Actor { - //#paths + // #paths val paths = List("/user/workers/w1", "/user/workers/w2", "/user/workers/w3") - //#paths + // #paths - //#round-robin-pool-1 + // #round-robin-pool-1 val router1: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router1") - //#round-robin-pool-1 + // #round-robin-pool-1 - //#round-robin-pool-2 + // #round-robin-pool-2 val router2: ActorRef = context.actorOf(RoundRobinPool(5).props(Props[Worker]()), "router2") - //#round-robin-pool-2 + // #round-robin-pool-2 - //#round-robin-group-1 + // #round-robin-group-1 val router3: ActorRef = context.actorOf(FromConfig.props(), "router3") - //#round-robin-group-1 + // #round-robin-group-1 - //#round-robin-group-2 + // #round-robin-group-2 val router4: ActorRef = context.actorOf(RoundRobinGroup(paths).props(), "router4") - //#round-robin-group-2 + // #round-robin-group-2 - //#random-pool-1 + // #random-pool-1 val router5: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router5") - //#random-pool-1 + // #random-pool-1 - //#random-pool-2 + // #random-pool-2 val router6: ActorRef = context.actorOf(RandomPool(5).props(Props[Worker]()), "router6") - //#random-pool-2 + // #random-pool-2 - //#random-group-1 + // #random-group-1 val router7: ActorRef = context.actorOf(FromConfig.props(), "router7") - //#random-group-1 + // #random-group-1 - //#random-group-2 + // #random-group-2 val router8: ActorRef = context.actorOf(RandomGroup(paths).props(), "router8") - //#random-group-2 + // #random-group-2 - //#balancing-pool-1 + // #balancing-pool-1 val router9: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router9") - //#balancing-pool-1 + // #balancing-pool-1 - //#balancing-pool-2 + // #balancing-pool-2 val router10: ActorRef = context.actorOf(BalancingPool(5).props(Props[Worker]()), "router10") - //#balancing-pool-2 + // #balancing-pool-2 // #balancing-pool-3 val router10b: ActorRef = context.actorOf(BalancingPool(20).props(Props[Worker]()), "router10b") - //#balancing-pool-3 + // #balancing-pool-3 for (i <- 1 to 100) router10b ! i val threads10b = Thread.getAllStackTraces.keySet.asScala.filter { _.getName contains "router10b" } val threads10bNr = threads10b.size @@ -404,111 +404,111 @@ router-dispatcher {} threads10bNr == 5, s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}") - //#smallest-mailbox-pool-1 + // #smallest-mailbox-pool-1 val router11: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router11") - //#smallest-mailbox-pool-1 + // #smallest-mailbox-pool-1 - //#smallest-mailbox-pool-2 + // #smallest-mailbox-pool-2 val router12: ActorRef = context.actorOf(SmallestMailboxPool(5).props(Props[Worker]()), "router12") - //#smallest-mailbox-pool-2 + // #smallest-mailbox-pool-2 - //#broadcast-pool-1 + // #broadcast-pool-1 val router13: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router13") - //#broadcast-pool-1 + // #broadcast-pool-1 - //#broadcast-pool-2 + // #broadcast-pool-2 val router14: ActorRef = context.actorOf(BroadcastPool(5).props(Props[Worker]()), "router14") - //#broadcast-pool-2 + // #broadcast-pool-2 - //#broadcast-group-1 + // #broadcast-group-1 val router15: ActorRef = context.actorOf(FromConfig.props(), "router15") - //#broadcast-group-1 + // #broadcast-group-1 - //#broadcast-group-2 + // #broadcast-group-2 val router16: ActorRef = context.actorOf(BroadcastGroup(paths).props(), "router16") - //#broadcast-group-2 + // #broadcast-group-2 - //#scatter-gather-pool-1 + // #scatter-gather-pool-1 val router17: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router17") - //#scatter-gather-pool-1 + // #scatter-gather-pool-1 - //#scatter-gather-pool-2 + // #scatter-gather-pool-2 val router18: ActorRef = context.actorOf(ScatterGatherFirstCompletedPool(5, within = 10.seconds).props(Props[Worker]()), "router18") - //#scatter-gather-pool-2 + // #scatter-gather-pool-2 - //#scatter-gather-group-1 + // #scatter-gather-group-1 val router19: ActorRef = context.actorOf(FromConfig.props(), "router19") - //#scatter-gather-group-1 + // #scatter-gather-group-1 - //#scatter-gather-group-2 + // #scatter-gather-group-2 val router20: ActorRef = context.actorOf(ScatterGatherFirstCompletedGroup(paths, within = 10.seconds).props(), "router20") - //#scatter-gather-group-2 + // #scatter-gather-group-2 - //#tail-chopping-pool-1 + // #tail-chopping-pool-1 val router21: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router21") - //#tail-chopping-pool-1 + // #tail-chopping-pool-1 - //#tail-chopping-pool-2 + // #tail-chopping-pool-2 val router22: ActorRef = context.actorOf(TailChoppingPool(5, within = 10.seconds, interval = 20.millis).props(Props[Worker]()), "router22") - //#tail-chopping-pool-2 + // #tail-chopping-pool-2 - //#tail-chopping-group-1 + // #tail-chopping-group-1 val router23: ActorRef = context.actorOf(FromConfig.props(), "router23") - //#tail-chopping-group-1 + // #tail-chopping-group-1 - //#tail-chopping-group-2 + // #tail-chopping-group-2 val router24: ActorRef = context.actorOf(TailChoppingGroup(paths, within = 10.seconds, interval = 20.millis).props(), "router24") - //#tail-chopping-group-2 + // #tail-chopping-group-2 - //#consistent-hashing-pool-1 + // #consistent-hashing-pool-1 val router25: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router25") - //#consistent-hashing-pool-1 + // #consistent-hashing-pool-1 - //#consistent-hashing-pool-2 + // #consistent-hashing-pool-2 val router26: ActorRef = context.actorOf(ConsistentHashingPool(5).props(Props[Worker]()), "router26") - //#consistent-hashing-pool-2 + // #consistent-hashing-pool-2 - //#consistent-hashing-group-1 + // #consistent-hashing-group-1 val router27: ActorRef = context.actorOf(FromConfig.props(), "router27") - //#consistent-hashing-group-1 + // #consistent-hashing-group-1 - //#consistent-hashing-group-2 + // #consistent-hashing-group-2 val router28: ActorRef = context.actorOf(ConsistentHashingGroup(paths).props(), "router28") - //#consistent-hashing-group-2 + // #consistent-hashing-group-2 - //#resize-pool-1 + // #resize-pool-1 val router29: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router29") - //#resize-pool-1 + // #resize-pool-1 - //#resize-pool-2 + // #resize-pool-2 val resizer = DefaultResizer(lowerBound = 2, upperBound = 15) val router30: ActorRef = context.actorOf(RoundRobinPool(5, Some(resizer)).props(Props[Worker]()), "router30") - //#resize-pool-2 + // #resize-pool-2 - //#optimal-size-exploring-resize-pool + // #optimal-size-exploring-resize-pool val router31: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router31") - //#optimal-size-exploring-resize-pool + // #optimal-size-exploring-resize-pool def receive = { case _ => @@ -527,89 +527,89 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender { import RouterDocSpec._ - //#create-workers + // #create-workers system.actorOf(Props[Workers](), "workers") - //#create-workers + // #create-workers - //#create-parent + // #create-parent system.actorOf(Props[Parent](), "parent") - //#create-parent + // #create-parent "demonstrate dispatcher" in { - //#dispatchers + // #dispatchers val router: ActorRef = system.actorOf( // “head” router actor will run on "router-dispatcher" dispatcher // Worker routees will run on "pool-dispatcher" dispatcher RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]()), name = "poolWithDispatcher") - //#dispatchers + // #dispatchers } "demonstrate broadcast" in { val router = system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]())) - //#broadcastDavyJonesWarning + // #broadcastDavyJonesWarning import akka.routing.Broadcast router ! Broadcast("Watch out for Davy Jones' locker") - //#broadcastDavyJonesWarning + // #broadcastDavyJonesWarning (receiveN(5, 5.seconds.dilated) should have).length(5) } "demonstrate PoisonPill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#poisonPill + // #poisonPill import akka.actor.PoisonPill router ! PoisonPill - //#poisonPill + // #poisonPill expectTerminated(router) } "demonstrate broadcast of PoisonPill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#broadcastPoisonPill + // #broadcastPoisonPill import akka.actor.PoisonPill import akka.routing.Broadcast router ! Broadcast(PoisonPill) - //#broadcastPoisonPill + // #broadcastPoisonPill expectTerminated(router) } "demonstrate Kill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#kill + // #kill import akka.actor.Kill router ! Kill - //#kill + // #kill expectTerminated(router) } "demonstrate broadcast of Kill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#broadcastKill + // #broadcastKill import akka.actor.Kill import akka.routing.Broadcast router ! Broadcast(Kill) - //#broadcastKill + // #broadcastKill expectTerminated(router) } "demonstrate remote deploy" in { - //#remoteRoutees + // #remoteRoutees import akka.actor.{ Address, AddressFromURIString } import akka.remote.routing.RemoteRouterConfig val addresses = Seq(Address("akka", "remotesys", "otherhost", 1234), AddressFromURIString("akka://othersys@anotherhost:1234")) val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo]())) - //#remoteRoutees + // #remoteRoutees } // only compile test def demonstrateRemoteDeployWithArtery(): Unit = { - //#remoteRoutees-artery + // #remoteRoutees-artery import akka.actor.{ Address, AddressFromURIString } import akka.remote.routing.RemoteRouterConfig val addresses = Seq(Address("akka", "remotesys", "otherhost", 1234), AddressFromURIString("akka://othersys@anotherhost:1234")) val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo]())) - //#remoteRoutees-artery + // #remoteRoutees-artery } } diff --git a/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala b/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala index 61101e70c6..c624cde69f 100644 --- a/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala @@ -4,20 +4,20 @@ package docs.serialization { - //#imports + // #imports import akka.actor._ import akka.actor.typed.scaladsl.Behaviors import akka.cluster.Cluster import akka.serialization._ - //#imports + // #imports import akka.testkit._ import com.typesafe.config.ConfigFactory import akka.actor.ExtendedActorSystem import java.nio.charset.StandardCharsets - //#my-own-serializer + // #my-own-serializer class MyOwnSerializer extends Serializer { // If you need logging here, introduce a constructor that takes an ExtendedActorSystem. @@ -36,23 +36,23 @@ package docs.serialization { // "toBinary" serializes the given object to an Array of Bytes def toBinary(obj: AnyRef): Array[Byte] = { // Put the code that serializes the object here - //#... + // #... Array[Byte]() - //#... + // #... } // "fromBinary" deserializes the given array, // using the type hint (if any, see "includeManifest" above) def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { // Put your code that deserializes here - //#... + // #... null - //#... + // #... } } - //#my-own-serializer + // #my-own-serializer - //#my-own-serializer2 + // #my-own-serializer2 class MyOwnSerializer2 extends SerializerWithStringManifest { val CustomerManifest = "customer" @@ -93,7 +93,7 @@ package docs.serialization { } } } - //#my-own-serializer2 + // #my-own-serializer2 trait MyOwnSerializable final case class Customer(name: String) extends MyOwnSerializable @@ -202,7 +202,7 @@ package docs.serialization { } "demonstrate the programmatic API" in { - //#programmatic + // #programmatic val system = ActorSystem("example") // Get the Serialization Extension @@ -218,7 +218,7 @@ package docs.serialization { // Turn it back into an object val back = serialization.deserialize(bytes, serializerId, manifest).get - //#programmatic + // #programmatic // Voilá! back should be(original) @@ -227,21 +227,21 @@ package docs.serialization { } def demonstrateTypedActorSystem(): Unit = { - //#programmatic-typed + // #programmatic-typed import akka.actor.typed.ActorSystem val system = ActorSystem(Behaviors.empty, "example") // Get the Serialization Extension val serialization = SerializationExtension(system) - //#programmatic-typed + // #programmatic-typed } def demonstrateSerializationOfActorRefs(): Unit = { val theActorRef: ActorRef = system.deadLetters val extendedSystem: ExtendedActorSystem = system.asInstanceOf[ExtendedActorSystem] - //#actorref-serializer + // #actorref-serializer // Serialize // (beneath toBinary) val serializedRef: String = Serialization.serializedActorPath(theActorRef) @@ -252,18 +252,18 @@ package docs.serialization { // (beneath fromBinary) val deserializedRef = extendedSystem.provider.resolveActorRef(serializedRef) // Then use the ActorRef - //#actorref-serializer + // #actorref-serializer } def demonstrateSerializationOfActorRefs2(): Unit = { val theActorRef: ActorRef = system.deadLetters - //#external-address-default + // #external-address-default val selfAddress = Cluster(system).selfAddress val serializedRef: String = theActorRef.path.toSerializationFormatWithAddress(selfAddress) - //#external-address-default + // #external-address-default } } } diff --git a/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala b/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala index 69e5ad5d2b..b24a5a7e65 100644 --- a/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala @@ -14,24 +14,24 @@ import scala.concurrent.duration._ import scala.concurrent.Await object BidiFlowDocSpec { - //#codec + // #codec trait Message case class Ping(id: Int) extends Message case class Pong(id: Int) extends Message - //#codec-impl + // #codec-impl def toBytes(msg: Message): ByteString = { - //#implementation-details-elided + // #implementation-details-elided implicit val order = ByteOrder.LITTLE_ENDIAN msg match { case Ping(id) => ByteString.newBuilder.putByte(1).putInt(id).result() case Pong(id) => ByteString.newBuilder.putByte(2).putInt(id).result() } - //#implementation-details-elided + // #implementation-details-elided } def fromBytes(bytes: ByteString): Message = { - //#implementation-details-elided + // #implementation-details-elided implicit val order = ByteOrder.LITTLE_ENDIAN val it = bytes.iterator it.getByte match { @@ -39,9 +39,9 @@ object BidiFlowDocSpec { case 2 => Pong(it.getInt) case other => throw new RuntimeException(s"parse error: expected 1|2 got $other") } - //#implementation-details-elided + // #implementation-details-elided } - //#codec-impl + // #codec-impl val codecVerbose = BidiFlow.fromGraph(GraphDSL.create() { b => // construct and add the top flow, going outbound @@ -54,9 +54,9 @@ object BidiFlowDocSpec { // this is the same as the above val codec = BidiFlow.fromFunctions(toBytes _, fromBytes _) - //#codec + // #codec - //#framing + // #framing val framing = BidiFlow.fromGraph(GraphDSL.create() { b => implicit val order = ByteOrder.LITTLE_ENDIAN @@ -78,28 +78,30 @@ object BidiFlowDocSpec { // this holds the current message length or -1 if at a boundary var needed = -1 - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (isClosed(in)) run() - else pull(in) - } - }) - setHandler(in, new InHandler { - override def onPush(): Unit = { - val bytes = grab(in) - stash = stash ++ bytes - run() - } + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + if (isClosed(in)) run() + else pull(in) + } + }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val bytes = grab(in) + stash = stash ++ bytes + run() + } - override def onUpstreamFinish(): Unit = { - // either we are done - if (stash.isEmpty) completeStage() - // or we still have bytes to emit - // wait with completion and let run() complete when the - // rest of the stash has been sent downstream - else if (isAvailable(out)) run() - } - }) + override def onUpstreamFinish(): Unit = { + // either we are done + if (stash.isEmpty) completeStage() + // or we still have bytes to emit + // wait with completion and let run() complete when the + // rest of the stash has been sent downstream + else if (isAvailable(out)) run() + } + }) private def run(): Unit = { if (needed == -1) { @@ -132,7 +134,7 @@ object BidiFlowDocSpec { val inbound = b.add(Flow[ByteString].via(new FrameParser)) BidiShape.fromFlows(outbound, inbound) }) - //#framing + // #framing val chopUp = BidiFlow.fromGraph(GraphDSL.create() { b => val f = Flow[ByteString].mapConcat(_.map(ByteString(_))) @@ -151,7 +153,7 @@ class BidiFlowDocSpec extends AkkaSpec { "A BidiFlow" must { "compose" in { - //#compose + // #compose /* construct protocol stack * +------------------------------------+ * | stack | @@ -170,7 +172,7 @@ class BidiFlowDocSpec extends AkkaSpec { val flow = stack.atop(stack.reversed).join(pingpong) val result = Source((0 to 9).map(Ping(_))).via(flow).limit(20).runWith(Sink.seq) Await.result(result, 1.second) should ===((0 to 9).map(Pong(_))) - //#compose + // #compose } "work when chopped up" in { diff --git a/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala b/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala index 0a3411460c..e225355607 100644 --- a/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala @@ -19,15 +19,15 @@ class CompositionDocSpec extends AkkaSpec { implicit val ec: ExecutionContext = system.dispatcher "nonnested flow" in { - //#non-nested-flow + // #non-nested-flow Source.single(0).map(_ + 1).filter(_ != 0).map(_ - 2).to(Sink.fold(0)(_ + _)) // ... where is the nesting? - //#non-nested-flow + // #non-nested-flow } "nested flow" in { - //#nested-flow + // #nested-flow val nestedSource = Source .single(0) // An atomic source @@ -47,7 +47,7 @@ class CompositionDocSpec extends AkkaSpec { // Create a RunnableGraph val runnableGraph = nestedSource.to(nestedSink) - //#nested-flow + // #nested-flow } "reusing components" in { @@ -68,13 +68,13 @@ class CompositionDocSpec extends AkkaSpec { .to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow .named("nestedSink") // wrap it up - //#reuse + // #reuse // Create a RunnableGraph from our components val runnableGraph = nestedSource.to(nestedSink) // Usage is uniform, no matter if modules are composite or atomic val runnableGraph2 = Source.single(0).to(Sink.fold(0)(_ + _)) - //#reuse + // #reuse } "complex graph" in { @@ -136,9 +136,9 @@ class CompositionDocSpec extends AkkaSpec { //#partial-graph // format: ON - //#partial-use + // #partial-use Source.single(0).via(partial).to(Sink.ignore) - //#partial-use + // #partial-use // format: OFF //#partial-flow-dsl @@ -169,18 +169,18 @@ class CompositionDocSpec extends AkkaSpec { } "closed graph" in { - //#embed-closed + // #embed-closed val closed1 = Source.single(0).to(Sink.foreach(println)) val closed2 = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => val embeddedClosed: ClosedShape = builder.add(closed1) // … embeddedClosed }) - //#embed-closed + // #embed-closed } "materialized values" in { - //#mat-combine-1 + // #mat-combine-1 // Materializes to Promise[Option[Int]] (red) val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int] @@ -190,9 +190,9 @@ class CompositionDocSpec extends AkkaSpec { // Materializes to Promise[Int] (red) val nestedSource: Source[Int, Promise[Option[Int]]] = source.viaMat(flow1)(Keep.left).named("nestedSource") - //#mat-combine-1 + // #mat-combine-1 - //#mat-combine-2 + // #mat-combine-2 // Materializes to NotUsed (orange) val flow2: Flow[Int, ByteString, NotUsed] = Flow[Int].map { i => ByteString(i.toString) @@ -205,18 +205,18 @@ class CompositionDocSpec extends AkkaSpec { // Materializes to Future[OutgoingConnection] (yellow) val nestedFlow: Flow[Int, ByteString, Future[OutgoingConnection]] = flow2.viaMat(flow3)(Keep.right).named("nestedFlow") - //#mat-combine-2 + // #mat-combine-2 - //#mat-combine-3 + // #mat-combine-3 // Materializes to Future[String] (green) val sink: Sink[ByteString, Future[String]] = Sink.fold("")(_ + _.utf8String) // Materializes to (Future[OutgoingConnection], Future[String]) (blue) val nestedSink: Sink[Int, (Future[OutgoingConnection], Future[String])] = nestedFlow.toMat(sink)(Keep.both) - //#mat-combine-3 + // #mat-combine-3 - //#mat-combine-4 + // #mat-combine-4 case class MyClass(private val p: Promise[Option[Int]], conn: OutgoingConnection) { def close() = p.trySuccess(None) } @@ -230,11 +230,11 @@ class CompositionDocSpec extends AkkaSpec { // Materializes to Future[MyClass] (purple) val runnableGraph: RunnableGraph[Future[MyClass]] = nestedSource.toMat(nestedSink)(f) - //#mat-combine-4 + // #mat-combine-4 } "attributes" in { - //#attributes-inheritance + // #attributes-inheritance import Attributes._ val nestedSource = Source.single(0).map(_ + 1).named("nestedSource") // Wrap, no inputBuffer set @@ -249,6 +249,6 @@ class CompositionDocSpec extends AkkaSpec { nestedFlow .to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow .withAttributes(name("nestedSink") and inputBuffer(3, 3)) // override - //#attributes-inheritance + // #attributes-inheritance } } diff --git a/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala index 3b7b2f7759..b233aa329f 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala @@ -23,18 +23,18 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { implicit val ec: ExecutionContext = system.dispatcher "source is immutable" in { - //#source-immutable + // #source-immutable val source = Source(1 to 10) source.map(_ => 0) // has no effect on source, since it's immutable source.runWith(Sink.fold(0)(_ + _)) // 55 val zeroes = source.map(_ => 0) // returns new Source[Int], with `map()` appended zeroes.runWith(Sink.fold(0)(_ + _)) // 0 - //#source-immutable + // #source-immutable } "materialization in steps" in { - //#materialization-in-steps + // #materialization-in-steps val source = Source(1 to 10) val sink = Sink.fold[Int, Int](0)(_ + _) @@ -44,21 +44,21 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { // materialize the flow and get the value of the sink val sum: Future[Int] = runnable.run() - //#materialization-in-steps + // #materialization-in-steps } "materialization runWith" in { - //#materialization-runWith + // #materialization-runWith val source = Source(1 to 10) val sink = Sink.fold[Int, Int](0)(_ + _) // materialize the flow, getting the Sink's materialized value val sum: Future[Int] = source.runWith(sink) - //#materialization-runWith + // #materialization-runWith } "materialization is unique" in { - //#stream-reuse + // #stream-reuse // connect the Source to the Sink, obtaining a RunnableGraph val sink = Sink.fold[Int, Int](0)(_ + _) val runnable: RunnableGraph[Future[Int]] = @@ -69,7 +69,7 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { val sum2: Future[Int] = runnable.run() // sum1 and sum2 are different Futures! - //#stream-reuse + // #stream-reuse } "compound source cannot be used as key" in { @@ -91,7 +91,7 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { } "creating sources, sinks" in { - //#source-sink + // #source-sink // Create a source from an Iterable Source(List(1, 2, 3)) @@ -117,11 +117,11 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { // A Sink that executes a side-effecting call for every element of the stream Sink.foreach[String](println(_)) - //#source-sink + // #source-sink } "various ways of connecting source, sink, flow" in { - //#flow-connecting + // #flow-connecting // Explicitly creating and wiring up a Source, Sink and Flow Source(1 to 6).via(Flow[Int].map(_ * 2)).to(Sink.foreach(println(_))) @@ -138,7 +138,7 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { Flow[Int].alsoTo(Sink.foreach(println(_))).to(Sink.ignore) Source(1 to 6).to(otherSink) - //#flow-connecting + // #flow-connecting } "various ways of transforming materialized values" in { @@ -152,7 +152,7 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { FlowShape(zip.in1, zip.out) }) - //#flow-mat-combine + // #flow-mat-combine // A source that can be signalled explicitly from the outside val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int] @@ -213,17 +213,17 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { ClosedShape }) - //#flow-mat-combine + // #flow-mat-combine } "defining asynchronous boundaries" in { - //#flow-async + // #flow-async Source(List(1, 2, 3)).map(_ + 1).async.map(_ * 2).to(Sink.ignore) - //#flow-async + // #flow-async } "source pre-materialization" in { - //#source-prematerialization + // #source-prematerialization val completeWithDone: PartialFunction[Any, CompletionStrategy] = { case Done => CompletionStrategy.immediately } val matValuePoweredSource = Source.actorRef[String]( @@ -238,13 +238,13 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { // pass source around for materialization source.runWith(Sink.foreach(println)) - //#source-prematerialization + // #source-prematerialization } } object FlowDocSpec { - //#materializer-from-actor-context + // #materializer-from-actor-context final class RunWithMyself extends Actor { implicit val mat: Materializer = Materializer(context) @@ -258,9 +258,9 @@ object FlowDocSpec { context.stop(self) // will also terminate the stream } } - //#materializer-from-actor-context + // #materializer-from-actor-context - //#materializer-from-system-in-actor + // #materializer-from-system-in-actor final class RunForever(implicit val mat: Materializer) extends Actor { Source.maybe.runWith(Sink.onComplete { @@ -273,6 +273,6 @@ object FlowDocSpec { context.stop(self) // will NOT terminate the stream (it's bound to the system!) } } - //#materializer-from-system-in-actor + // #materializer-from-system-in-actor } diff --git a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala index 247ee0ce92..8da9d17323 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala @@ -14,12 +14,12 @@ import scala.concurrent.duration._ class FlowErrorDocSpec extends AkkaSpec { "demonstrate fail stream" in { - //#stop + // #stop val source = Source(0 to 5).map(100 / _) val result = source.runWith(Sink.fold(0)(_ + _)) // division by zero will fail the stream and the // result here will be a Future completed with Failure(ArithmeticException) - //#stop + // #stop intercept[ArithmeticException] { Await.result(result, 3.seconds) @@ -27,7 +27,7 @@ class FlowErrorDocSpec extends AkkaSpec { } "demonstrate resume stream" in { - //#resume + // #resume val decider: Supervision.Decider = { case _: ArithmeticException => Supervision.Resume case _ => Supervision.Stop @@ -41,13 +41,13 @@ class FlowErrorDocSpec extends AkkaSpec { val result = withCustomSupervision.run() // the element causing division by zero will be dropped // result here will be a Future completed with Success(228) - //#resume + // #resume Await.result(result, 3.seconds) should be(228) } "demonstrate resume section" in { - //#resume-section + // #resume-section val decider: Supervision.Decider = { case _: ArithmeticException => Supervision.Resume case _ => Supervision.Stop @@ -61,13 +61,13 @@ class FlowErrorDocSpec extends AkkaSpec { val result = source.runWith(Sink.fold(0)(_ + _)) // the elements causing division by zero will be dropped // result here will be a Future completed with Success(150) - //#resume-section + // #resume-section Await.result(result, 3.seconds) should be(150) } "demonstrate restart section" in { - //#restart-section + // #restart-section val decider: Supervision.Decider = { case _: IllegalArgumentException => Supervision.Restart case _ => Supervision.Stop @@ -83,24 +83,23 @@ class FlowErrorDocSpec extends AkkaSpec { // the negative element cause the scan stage to be restarted, // i.e. start from 0 again // result here will be a Future completed with Success(Vector(0, 1, 4, 0, 5, 12)) - //#restart-section + // #restart-section Await.result(result, 3.seconds) should be(Vector(0, 1, 4, 0, 5, 12)) } "demonstrate recover" in { - //#recover + // #recover Source(0 to 6) - .map( - n => - // assuming `4` and `5` are unexpected values that could throw exception - if (List(4, 5).contains(n)) throw new RuntimeException(s"Boom! Bad value found: $n") - else n.toString) + .map(n => + // assuming `4` and `5` are unexpected values that could throw exception + if (List(4, 5).contains(n)) throw new RuntimeException(s"Boom! Bad value found: $n") + else n.toString) .recover { case e: RuntimeException => e.getMessage } .runForeach(println) - //#recover + // #recover /* Output: @@ -111,22 +110,23 @@ Output: 3 // last element before failure Boom! Bad value found: 4 // first element on failure //#recover-output - */ + */ } "demonstrate recoverWithRetries" in { - //#recoverWithRetries + // #recoverWithRetries val planB = Source(List("five", "six", "seven", "eight")) Source(0 to 10) .map(n => if (n < 5) n.toString else throw new RuntimeException("Boom!")) - .recoverWithRetries(attempts = 1, { - case _: RuntimeException => planB - }) + .recoverWithRetries(attempts = 1, + { + case _: RuntimeException => planB + }) .runForeach(println) - //#recoverWithRetries + // #recoverWithRetries /* Output: @@ -141,7 +141,7 @@ six seven eight //#recoverWithRetries-output - */ + */ } } diff --git a/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala index 6f6c50e3d7..97c1545812 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala @@ -34,16 +34,16 @@ class FlowParallelismDocSpec extends AkkaSpec { //format: ON "Demonstrate pipelining" in { - //#pipelining + // #pipelining // With the two frying pans we can fully cook pancakes val pancakeChef: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow[ScoopOfBatter].via(fryingPan1.async).via(fryingPan2.async) - //#pipelining + // #pipelining } "Demonstrate parallel processing" in { - //#parallelism + // #parallelism val fryingPan: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow[ScoopOfBatter].map { batter => Pancake() @@ -64,11 +64,11 @@ class FlowParallelismDocSpec extends AkkaSpec { FlowShape(dispatchBatter.in, mergePancakes.out) }) - //#parallelism + // #parallelism } "Demonstrate parallelized pipelines" in { - //#parallel-pipeline + // #parallel-pipeline val pancakeChef: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit builder => val dispatchBatter = builder.add(Balance[ScoopOfBatter](2)) @@ -81,11 +81,11 @@ class FlowParallelismDocSpec extends AkkaSpec { FlowShape(dispatchBatter.in, mergePancakes.out) }) - //#parallel-pipeline + // #parallel-pipeline } "Demonstrate pipelined parallel processing" in { - //#pipelined-parallel + // #pipelined-parallel val pancakeChefs1: Flow[ScoopOfBatter, HalfCookedPancake, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit builder => val dispatchBatter = builder.add(Balance[ScoopOfBatter](2)) @@ -113,7 +113,7 @@ class FlowParallelismDocSpec extends AkkaSpec { }) val kitchen: Flow[ScoopOfBatter, Pancake, NotUsed] = pancakeChefs1.via(pancakeChefs2) - //#pipelined-parallel + // #pipelined-parallel } diff --git a/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala index e21f75aef9..0281e6e02f 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala @@ -13,7 +13,7 @@ import docs.CompileOnlySpec class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { "offer a source ref" in compileOnlySpec { - //#offer-source + // #offer-source import akka.stream.SourceRef import akka.pattern.pipe @@ -39,9 +39,9 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { def streamLogs(streamId: Long): Source[String, NotUsed] = ??? } - //#offer-source + // #offer-source - //#offer-source-use + // #offer-source-use val sourceActor = system.actorOf(Props[DataSource](), "dataSource") sourceActor ! RequestLogs(1337) @@ -52,11 +52,11 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { // alternatively explicitly obtain Source from SourceRef: // offer.sourceRef.source.runWith(Sink.foreach(println)) - //#offer-source-use + // #offer-source-use } "offer a sink ref" in compileOnlySpec { - //#offer-sink + // #offer-sink import akka.stream.SinkRef case class PrepareUpload(id: String) @@ -82,11 +82,11 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { def logsSinkFor(nodeId: String): Sink[String, NotUsed] = ??? } - //#offer-sink + // #offer-sink def localMetrics(): Source[String, NotUsed] = Source.single("") - //#offer-sink-use + // #offer-sink-use val receiver = system.actorOf(Props[DataReceiver](), "receiver") receiver ! PrepareUpload("system-42-tmp") @@ -94,11 +94,11 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { // stream local metrics to Sink's origin: localMetrics().runWith(ready.sinkRef) - //#offer-sink-use + // #offer-sink-use } "show how to configure timeouts with attrs" in compileOnlySpec { - //#attr-sub-timeout + // #attr-sub-timeout // configure the timeout for source import scala.concurrent.duration._ import akka.stream.StreamRefAttributes @@ -113,7 +113,7 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { .sinkRef() .addAttributes(StreamRefAttributes.subscriptionTimeout(5.seconds)) .runWith(Sink.ignore) // not very interesting Sink, just an example - //#attr-sub-timeout + // #attr-sub-timeout } } diff --git a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala index 04a54f19a4..892183bcfd 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala @@ -38,14 +38,14 @@ class GraphDSLDocSpec extends AkkaSpec { //#simple-graph-dsl //format: ON - //#simple-graph-run + // #simple-graph-run g.run() - //#simple-graph-run + // #simple-graph-run } "flow connection errors" in { intercept[IllegalStateException] { - //#simple-graph + // #simple-graph RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val source1 = Source(1 to 10) @@ -58,18 +58,18 @@ class GraphDSLDocSpec extends AkkaSpec { // unconnected zip.out (!) => "must have at least 1 outgoing edge" ClosedShape }) - //#simple-graph + // #simple-graph }.getMessage should include("ZipWith2.out") } "reusing a flow in a graph" in { - //#graph-dsl-reusing-a-flow + // #graph-dsl-reusing-a-flow val topHeadSink = Sink.head[Int] val bottomHeadSink = Sink.head[Int] val sharedDoubler = Flow[Int].map(_ * 2) - //#graph-dsl-reusing-a-flow + // #graph-dsl-reusing-a-flow // format: OFF val g = @@ -93,7 +93,7 @@ class GraphDSLDocSpec extends AkkaSpec { "building a reusable component" in { - //#graph-dsl-components-shape + // #graph-dsl-components-shape // A shape represents the input and output ports of a reusable // processing module case class PriorityWorkerPoolShape[In, Out](jobsIn: Inlet[In], priorityJobsIn: Inlet[In], resultsOut: Outlet[Out]) @@ -112,9 +112,9 @@ class GraphDSLDocSpec extends AkkaSpec { PriorityWorkerPoolShape(jobsIn.carbonCopy(), priorityJobsIn.carbonCopy(), resultsOut.carbonCopy()) } - //#graph-dsl-components-shape + // #graph-dsl-components-shape - //#graph-dsl-components-create + // #graph-dsl-components-create object PriorityWorkerPool { def apply[In, Out]( worker: Flow[In, Out, Any], @@ -147,11 +147,11 @@ class GraphDSLDocSpec extends AkkaSpec { } } - //#graph-dsl-components-create + // #graph-dsl-components-create def println(s: Any): Unit = () - //#graph-dsl-components-use + // #graph-dsl-components-use val worker1 = Flow[String].map("step 1 " + _) val worker2 = Flow[String].map("step 2 " + _) @@ -162,19 +162,19 @@ class GraphDSLDocSpec extends AkkaSpec { val priorityPool1 = b.add(PriorityWorkerPool(worker1, 4)) val priorityPool2 = b.add(PriorityWorkerPool(worker2, 2)) - Source(1 to 100).map("job: " + _) ~> priorityPool1.jobsIn + Source(1 to 100).map("job: " + _) ~> priorityPool1.jobsIn Source(1 to 100).map("priority job: " + _) ~> priorityPool1.priorityJobsIn - priorityPool1.resultsOut ~> priorityPool2.jobsIn + priorityPool1.resultsOut ~> priorityPool2.jobsIn Source(1 to 100).map("one-step, priority " + _) ~> priorityPool2.priorityJobsIn priorityPool2.resultsOut ~> Sink.foreach(println) ClosedShape }) .run() - //#graph-dsl-components-use + // #graph-dsl-components-use - //#graph-dsl-components-shape2 + // #graph-dsl-components-shape2 import FanInShape.{ Init, Name } class PriorityWorkerPoolShape2[In, Out](_init: Init[Out] = Name("PriorityWorkerPool")) @@ -185,22 +185,22 @@ class GraphDSLDocSpec extends AkkaSpec { val priorityJobsIn = newInlet[In]("priorityJobsIn") // Outlet[Out] with name "out" is automatically created } - //#graph-dsl-components-shape2 + // #graph-dsl-components-shape2 } "access to materialized value" in { - //#graph-dsl-matvalue + // #graph-dsl-matvalue import GraphDSL.Implicits._ val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.createGraph(Sink.fold[Int, Int](0)(_ + _)) { implicit builder => fold => FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet) }) - //#graph-dsl-matvalue + // #graph-dsl-matvalue Await.result(Source(1 to 10).via(foldFlow).runWith(Sink.head), 3.seconds) should ===(55) - //#graph-dsl-matvalue-cycle + // #graph-dsl-matvalue-cycle import GraphDSL.Implicits._ // This cannot produce any value: val cyclicFold: Source[Int, Future[Int]] = @@ -213,7 +213,7 @@ class GraphDSLDocSpec extends AkkaSpec { builder.materializedValue.mapAsync(4)(identity) ~> fold SourceShape(builder.materializedValue.mapAsync(4)(identity).outlet) }) - //#graph-dsl-matvalue-cycle + // #graph-dsl-matvalue-cycle } } diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala index 5702d94ae8..0e6bc509ab 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala @@ -19,7 +19,7 @@ import scala.collection.immutable.Iterable class GraphStageDocSpec extends AkkaSpec { "Demonstrate creation of GraphStage boilerplate" in { - //#boilerplate-example + // #boilerplate-example import akka.stream.SourceShape import akka.stream.stage.GraphStage @@ -32,12 +32,12 @@ class GraphStageDocSpec extends AkkaSpec { // This is where the actual (possibly stateful) logic will live override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = ??? } - //#boilerplate-example + // #boilerplate-example } "Demonstrate creation of GraphStage Source" in { - //#custom-source-example + // #custom-source-example import akka.stream.Attributes import akka.stream.Outlet import akka.stream.SourceShape @@ -58,17 +58,18 @@ class GraphStageDocSpec extends AkkaSpec { // registered handlers. private var counter = 1 - setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, counter) - counter += 1 - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + push(out, counter) + counter += 1 + } + }) } } - //#custom-source-example + // #custom-source-example - //#simple-source-usage + // #simple-source-usage // A GraphStage is a proper Graph, just like what GraphDSL.create would return val sourceGraph: Graph[SourceShape[Int], NotUsed] = new NumbersSource @@ -80,14 +81,14 @@ class GraphStageDocSpec extends AkkaSpec { // The source is reusable. This returns 5050 val result2: Future[Int] = mySource.take(100).runFold(0)(_ + _) - //#simple-source-usage + // #simple-source-usage Await.result(result1, 3.seconds) should ===(55) Await.result(result2, 3.seconds) should ===(5050) } "Demonstrate creation of GraphStage Sink" in { - //#custom-sink-example + // #custom-sink-example import akka.stream.Attributes import akka.stream.Inlet import akka.stream.SinkShape @@ -105,20 +106,21 @@ class GraphStageDocSpec extends AkkaSpec { // This requests one element at the Sink startup. override def preStart(): Unit = pull(in) - setHandler(in, new InHandler { - override def onPush(): Unit = { - println(grab(in)) - pull(in) - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + println(grab(in)) + pull(in) + } + }) } } - //#custom-sink-example + // #custom-sink-example Source(List(0, 1, 2)).runWith(Sink.fromGraph(new StdoutSink)) } - //#one-to-one + // #one-to-one class Map[A, B](f: A => B) extends GraphStage[FlowShape[A, B]] { val in = Inlet[A]("Map.in") @@ -128,19 +130,21 @@ class GraphStageDocSpec extends AkkaSpec { override def createLogic(attr: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - push(out, f(grab(in))) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + push(out, f(grab(in))) + } + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } } - //#one-to-one + // #one-to-one "Demonstrate a one to one element GraphStage" in { // tests: @@ -152,7 +156,7 @@ class GraphStageDocSpec extends AkkaSpec { Await.result(result, 3.seconds) should ===(Seq(3, 3, 5)) } - //#many-to-one + // #many-to-one class Filter[A](p: A => Boolean) extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("Filter.in") @@ -162,21 +166,23 @@ class GraphStageDocSpec extends AkkaSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - if (p(elem)) push(out, elem) - else pull(in) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + if (p(elem)) push(out, elem) + else pull(in) + } + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } } - //#many-to-one + // #many-to-one "Demonstrate a many to one element GraphStage" in { @@ -189,7 +195,7 @@ class GraphStageDocSpec extends AkkaSpec { Await.result(result, 3.seconds) should ===(Seq(2, 4, 6)) } - //#one-to-many + // #one-to-many class Duplicator[A] extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("Duplicator.in") @@ -203,32 +209,34 @@ class GraphStageDocSpec extends AkkaSpec { // MUST be inside the GraphStageLogic var lastElem: Option[A] = None - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - lastElem = Some(elem) - push(out, elem) - } - - override def onUpstreamFinish(): Unit = { - if (lastElem.isDefined) emit(out, lastElem.get) - complete(out) - } - - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (lastElem.isDefined) { - push(out, lastElem.get) - lastElem = None - } else { - pull(in) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + lastElem = Some(elem) + push(out, elem) } - } - }) + + override def onUpstreamFinish(): Unit = { + if (lastElem.isDefined) emit(out, lastElem.get) + complete(out) + } + + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + if (lastElem.isDefined) { + push(out, lastElem.get) + lastElem = None + } else { + pull(in) + } + } + }) } } - //#one-to-many + // #one-to-many "Demonstrate a one to many element GraphStage" in { // tests: @@ -241,7 +249,7 @@ class GraphStageDocSpec extends AkkaSpec { } "Demonstrate a simpler one to many stage" in { - //#simpler-one-to-many + // #simpler-one-to-many class Duplicator[A] extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("Duplicator.in") @@ -252,22 +260,24 @@ class GraphStageDocSpec extends AkkaSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - // this will temporarily suspend this handler until the two elems - // are emitted and then reinstates it - emitMultiple(out, Iterable(elem, elem)) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + // this will temporarily suspend this handler until the two elems + // are emitted and then reinstates it + emitMultiple(out, Iterable(elem, elem)) + } + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } } - //#simpler-one-to-many + // #simpler-one-to-many // tests: val duplicator = Flow.fromGraph(new Duplicator[Int]) @@ -282,18 +292,18 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate chaining of graph stages" in { val sink = Sink.fold[List[Int], Int](List.empty[Int])((acc, n) => acc :+ n) - //#graph-operator-chain + // #graph-operator-chain val resultFuture = Source(1 to 5).via(new Filter(_ % 2 == 0)).via(new Duplicator()).via(new Map(_ / 2)).runWith(sink) - //#graph-operator-chain + // #graph-operator-chain Await.result(resultFuture, 3.seconds) should ===(List(1, 1, 2, 2)) } "Demonstrate an asynchronous side channel" in { import system.dispatcher - //#async-side-channel + // #async-side-channel // will close upstream in all materializations of the graph stage instance // when the future completes class KillSwitch[A](switch: Future[Unit]) extends GraphStage[FlowShape[A, A]] { @@ -307,21 +317,23 @@ class GraphStageDocSpec extends AkkaSpec { new GraphStageLogic(shape) { override def preStart(): Unit = { - val callback = getAsyncCallback[Unit] { (_) => + val callback = getAsyncCallback[Unit] { _ => completeStage() } switch.foreach(callback.invoke) } - setHandler(in, new InHandler { - override def onPush(): Unit = { push(out, grab(in)) } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { pull(in) } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { push(out, grab(in)) } + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { pull(in) } + }) } } - //#async-side-channel + // #async-side-channel // tests: @@ -354,7 +366,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate a graph stage with a timer" in { - //#timed + // #timed // each time an event is pushed through it will trigger a period of silence class TimedGate[A](silencePeriod: FiniteDuration) extends GraphStage[FlowShape[A, A]] { @@ -368,27 +380,29 @@ class GraphStageDocSpec extends AkkaSpec { var open = false - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - if (open) pull(in) - else { - push(out, elem) - open = true - scheduleOnce(None, silencePeriod) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + if (open) pull(in) + else { + push(out, elem) + open = true + scheduleOnce(None, silencePeriod) + } } - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { pull(in) } - }) + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { pull(in) } + }) override protected def onTimer(timerKey: Any): Unit = { open = false } } } - //#timed + // #timed // tests: val result = @@ -402,7 +416,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate a custom materialized value" in { - //#materialized + // #materialized class FirstValue[A] extends GraphStageWithMaterializedValue[FlowShape[A, A], Future[A]] { val in = Inlet[A]("FirstValue.in") @@ -414,33 +428,36 @@ class GraphStageDocSpec extends AkkaSpec { val promise = Promise[A]() val logic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - promise.success(elem) - push(out, elem) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + promise.success(elem) + push(out, elem) - // replace handler with one that only forwards elements - setHandler(in, new InHandler { - override def onPush(): Unit = { - push(out, grab(in)) - } - }) - } - }) + // replace handler with one that only forwards elements + setHandler(in, + new InHandler { + override def onPush(): Unit = { + push(out, grab(in)) + } + }) + } + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } (logic, promise.future) } } - //#materialized + // #materialized // tests: val flow = Source(Vector(1, 2, 3)).viaMat(new FirstValue)(Keep.right).to(Sink.ignore) @@ -453,7 +470,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate a detached graph stage" in { - //#detached + // #detached class TwoBuffer[A] extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("TwoBuffer.in") @@ -517,7 +534,7 @@ class GraphStageDocSpec extends AkkaSpec { } } - //#detached + // #detached // tests: val result1 = Source(Vector(1, 2, 3)).via(new TwoBuffer).runFold(Vector.empty[Int])((acc, n) => acc :+ n) @@ -542,7 +559,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate stream extension" when { "targeting a Source" in { - //#extending-source + // #extending-source implicit class SourceDuplicator[Out, Mat](s: Source[Out, Mat]) { def duplicateElements: Source[Out, Mat] = s.via(new Duplicator) } @@ -550,11 +567,11 @@ class GraphStageDocSpec extends AkkaSpec { val s = Source(1 to 3).duplicateElements s.runWith(Sink.seq).futureValue should ===(Seq(1, 1, 2, 2, 3, 3)) - //#extending-source + // #extending-source } "targeting a Flow" in { - //#extending-flow + // #extending-flow implicit class FlowDuplicator[In, Out, Mat](s: Flow[In, Out, Mat]) { def duplicateElements: Flow[In, Out, Mat] = s.via(new Duplicator) } @@ -562,7 +579,7 @@ class GraphStageDocSpec extends AkkaSpec { val f = Flow[Int].duplicateElements Source(1 to 3).via(f).runWith(Sink.seq).futureValue should ===(Seq(1, 1, 2, 2, 3, 3)) - //#extending-flow + // #extending-flow } } diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala index 4d5a5d8661..e7f2085692 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala @@ -15,7 +15,7 @@ class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { implicit val ec: ExecutionContext = system.dispatcher - //#operator-with-logging + // #operator-with-logging import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging } final class RandomLettersSource extends GraphStage[SourceShape[String]] { @@ -24,22 +24,23 @@ class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with StageLogging { - setHandler(out, new OutHandler { - override def onPull(): Unit = { - val c = nextChar() // ASCII lower case letters + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + val c = nextChar() // ASCII lower case letters - // `log` is obtained from materializer automatically (via StageLogging) - log.debug("Randomly generated: [{}]", c) + // `log` is obtained from materializer automatically (via StageLogging) + log.debug("Randomly generated: [{}]", c) - push(out, c.toString) - } - }) + push(out, c.toString) + } + }) } def nextChar(): Char = ThreadLocalRandom.current().nextInt('a', 'z'.toInt + 1).toChar } - //#operator-with-logging + // #operator-with-logging "demonstrate logging in custom graphstage" in { val n = 10 diff --git a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala index af91920a84..ec7055b2ee 100644 --- a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala @@ -20,7 +20,7 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { "demonstrate creating a dynamic merge" in { def println(s: String) = testActor ! s - //#merge-hub + // #merge-hub // A simple consumer that will print to the console for now val consumer = Sink.foreach(println) @@ -38,13 +38,13 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Feeding two independent sources into the hub. Source.single("Hello!").runWith(toConsumer) Source.single("Hub!").runWith(toConsumer) - //#merge-hub + // #merge-hub expectMsgAllOf("Hello!", "Hub!") } "demonstrate creating a dynamic broadcast" in compileOnlySpec { - //#broadcast-hub + // #broadcast-hub // A simple producer that publishes a new "message" every second val producer = Source.tick(1.second, 1.second, "New message") @@ -62,26 +62,26 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Print out messages from the producer in two independent consumers fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.runForeach(msg => println("consumer2: " + msg)) - //#broadcast-hub + // #broadcast-hub } "demonstrate combination" in { def println(s: String) = testActor ! s - //#pub-sub-1 + // #pub-sub-1 // Obtain a Sink and Source which will publish and receive from the "bus" respectively. val (sink, source) = MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run() - //#pub-sub-1 + // #pub-sub-1 - //#pub-sub-2 + // #pub-sub-2 // Ensure that the Broadcast output is dropped if there are no listening parties. // If this dropping Sink is not attached, then the broadcast hub will not drop any // elements itself when there are no subscribers, backpressuring the producer instead. source.runWith(Sink.ignore) - //#pub-sub-2 + // #pub-sub-2 - //#pub-sub-3 + // #pub-sub-3 // We create now a Flow that represents a publish-subscribe channel using the above // started stream as its "topic". We add two more features, external cancellation of // the registration and automatic cleanup for very slow subscribers. @@ -90,19 +90,19 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { .fromSinkAndSource(sink, source) .joinMat(KillSwitches.singleBidi[String, String])(Keep.right) .backpressureTimeout(3.seconds) - //#pub-sub-3 + // #pub-sub-3 - //#pub-sub-4 + // #pub-sub-4 val switch: UniqueKillSwitch = Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(println)).run() // Shut down externally switch.shutdown() - //#pub-sub-4 + // #pub-sub-4 } "demonstrate creating a dynamic partition hub" in compileOnlySpec { - //#partition-hub + // #partition-hub // A simple producer that publishes a new "message-" every second val producer = Source.tick(1.second, 1.second, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") @@ -124,11 +124,11 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Print out messages from the producer in two independent consumers fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.runForeach(msg => println("consumer2: " + msg)) - //#partition-hub + // #partition-hub } "demonstrate creating a dynamic stateful partition hub" in compileOnlySpec { - //#partition-hub-stateful + // #partition-hub-stateful // A simple producer that publishes a new "message-" every second val producer = Source.tick(1.second, 1.second, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") @@ -158,11 +158,11 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Print out messages from the producer in two independent consumers fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.runForeach(msg => println("consumer2: " + msg)) - //#partition-hub-stateful + // #partition-hub-stateful } "demonstrate creating a dynamic partition hub routing to fastest consumer" in compileOnlySpec { - //#partition-hub-fastest + // #partition-hub-fastest val producer = Source(0 until 100) // ConsumerInfo.queueSize is the approximate number of buffered elements for a consumer. @@ -178,7 +178,7 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.throttle(10, 100.millis).runForeach(msg => println("consumer2: " + msg)) - //#partition-hub-fastest + // #partition-hub-fastest } } diff --git a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala index 5ac053395a..a8cf6675ee 100644 --- a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala @@ -42,21 +42,21 @@ object IntegrationDocSpec { """) class AddressSystem { - //#email-address-lookup + // #email-address-lookup def lookupEmail(handle: String): Future[Option[String]] = - //#email-address-lookup + // #email-address-lookup Future.successful(Some(handle + "@somewhere.com")) - //#phone-lookup + // #phone-lookup def lookupPhoneNumber(handle: String): Future[Option[String]] = - //#phone-lookup + // #phone-lookup Future.successful(Some(handle.hashCode.toString)) } class AddressSystem2 { - //#email-address-lookup2 + // #email-address-lookup2 def lookupEmail(handle: String): Future[String] = - //#email-address-lookup2 + // #email-address-lookup2 Future.successful(handle + "@somewhere.com") } @@ -64,26 +64,26 @@ object IntegrationDocSpec { final case class TextMessage(to: String, body: String) class EmailServer(probe: ActorRef) { - //#email-server-send + // #email-server-send def send(email: Email): Future[Unit] = { // ... - //#email-server-send + // #email-server-send probe ! email.to Future.successful(()) - //#email-server-send + // #email-server-send } - //#email-server-send + // #email-server-send } class SmsServer(probe: ActorRef) { - //#sms-server-send + // #sms-server-send def send(text: TextMessage): Unit = { // ... - //#sms-server-send + // #sms-server-send probe ! text.to - //#sms-server-send + // #sms-server-send } - //#sms-server-send + // #sms-server-send } final case class Save(tweet: Tweet) @@ -97,11 +97,11 @@ object IntegrationDocSpec { } } - //#sometimes-slow-service + // #sometimes-slow-service class SometimesSlowService(implicit ec: ExecutionContext) { - //#sometimes-slow-service + // #sometimes-slow-service def println(s: String): Unit = () - //#sometimes-slow-service + // #sometimes-slow-service private val runningCount = new AtomicInteger @@ -117,9 +117,9 @@ object IntegrationDocSpec { } } } - //#sometimes-slow-service + // #sometimes-slow-service - //#ask-actor + // #ask-actor class Translator extends Actor { def receive = { case word: String => @@ -128,7 +128,7 @@ object IntegrationDocSpec { sender() ! reply // reply to the ask } } - //#ask-actor + // #ask-actor } @@ -139,7 +139,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val ref: ActorRef = system.actorOf(Props[Translator]()) "ask" in { - //#ask + // #ask implicit val askTimeout: Timeout = 5.seconds val words: Source[String, NotUsed] = Source(List("hello", "hi")) @@ -149,7 +149,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { // continue processing of the replies from the actor .map(_.toLowerCase) .runWith(Sink.ignore) - //#ask + // #ask } "calling external service with mapAsync" in { @@ -157,19 +157,19 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val addressSystem = new AddressSystem val emailServer = new EmailServer(probe.ref) - //#tweet-authors + // #tweet-authors val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) - //#tweet-authors + // #tweet-authors - //#email-addresses-mapAsync + // #email-addresses-mapAsync val emailAddresses: Source[String, NotUsed] = authors.mapAsync(4)(author => addressSystem.lookupEmail(author.handle)).collect { case Some(emailAddress) => emailAddress } - //#email-addresses-mapAsync + // #email-addresses-mapAsync - //#send-emails + // #send-emails val sendEmails: RunnableGraph[NotUsed] = emailAddresses .mapAsync(4)(address => { @@ -178,7 +178,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.ignore) sendEmails.run() - //#send-emails + // #send-emails probe.expectMsg("rolandkuhn@somewhere.com") probe.expectMsg("patriknw@somewhere.com") @@ -190,7 +190,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { } "actorRefWithBackpressure" in { - //#actorRefWithBackpressure + // #actorRefWithBackpressure val words: Source[String, NotUsed] = Source(List("hello", "hi")) @@ -217,10 +217,10 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { probe.expectMsg("hello") probe.expectMsg("hi") probe.expectMsg("Stream completed!") - //#actorRefWithBackpressure + // #actorRefWithBackpressure } - //#actorRefWithBackpressure-actor + // #actorRefWithBackpressure-actor object AckingReceiver { case object Ack @@ -250,14 +250,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { log.error(ex, "Stream failed!") } } - //#actorRefWithBackpressure-actor + // #actorRefWithBackpressure-actor "lookup email with mapAsync and supervision" in { val addressSystem = new AddressSystem2 val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) - //#email-addresses-mapAsync-supervision + // #email-addresses-mapAsync-supervision import ActorAttributes.supervisionStrategy import Supervision.resumingDecider @@ -266,7 +266,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { Flow[Author] .mapAsync(4)(author => addressSystem.lookupEmail(author.handle)) .withAttributes(supervisionStrategy(resumingDecider))) - //#email-addresses-mapAsync-supervision + // #email-addresses-mapAsync-supervision } "calling external service with mapAsyncUnordered" in { @@ -274,7 +274,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val addressSystem = new AddressSystem val emailServer = new EmailServer(probe.ref) - //#external-service-mapAsyncUnordered + // #external-service-mapAsyncUnordered val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) @@ -291,7 +291,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.ignore) sendEmails.run() - //#external-service-mapAsyncUnordered + // #external-service-mapAsyncUnordered probe.receiveN(7).toSet should be( Set( @@ -316,7 +316,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { case Some(phoneNo) => phoneNo } - //#blocking-mapAsync + // #blocking-mapAsync val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val sendTextMessages: RunnableGraph[NotUsed] = @@ -329,7 +329,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.ignore) sendTextMessages.run() - //#blocking-mapAsync + // #blocking-mapAsync probe.receiveN(7).toSet should be( Set( @@ -354,7 +354,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { case Some(phoneNo) => phoneNo } - //#blocking-map + // #blocking-map val send = Flow[String] .map { phoneNo => smsServer.send(TextMessage(to = phoneNo, body = "I like your tweet")) @@ -364,7 +364,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { phoneNumbers.via(send).to(Sink.ignore) sendTextMessages.run() - //#blocking-map + // #blocking-map probe.expectMsg("rolandkuhn".hashCode.toString) probe.expectMsg("patriknw".hashCode.toString) @@ -379,7 +379,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val probe = TestProbe() val database = system.actorOf(Props(classOf[DatabaseService], probe.ref), "db") - //#save-tweets + // #save-tweets import akka.pattern.ask val akkaTweets: Source[Tweet, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)) @@ -387,7 +387,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { implicit val timeout: Timeout = 3.seconds val saveTweets: RunnableGraph[NotUsed] = akkaTweets.mapAsync(4)(tweet => database ? Save(tweet)).to(Sink.ignore) - //#save-tweets + // #save-tweets saveTweets.run() @@ -407,7 +407,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { probe.ref ! s } - //#sometimes-slow-mapAsync + // #sometimes-slow-mapAsync implicit val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val service = new SometimesSlowService @@ -417,7 +417,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.foreach(elem => println(s"after: $elem"))) .withAttributes(Attributes.inputBuffer(initial = 4, max = 4)) .run() - //#sometimes-slow-mapAsync + // #sometimes-slow-mapAsync probe.expectMsg("after: A") probe.expectMsg("after: B") @@ -438,7 +438,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { probe.ref ! s } - //#sometimes-slow-mapAsyncUnordered + // #sometimes-slow-mapAsyncUnordered implicit val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val service = new SometimesSlowService @@ -448,7 +448,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.foreach(elem => println(s"after: $elem"))) .withAttributes(Attributes.inputBuffer(initial = 4, max = 4)) .run() - //#sometimes-slow-mapAsyncUnordered + // #sometimes-slow-mapAsyncUnordered probe.receiveN(10).toSet should be( Set( @@ -465,7 +465,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { } "illustrate use of source queue" in { - //#source-queue + // #source-queue val bufferSize = 10 val elementsToProcess = 5 @@ -489,14 +489,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { } }) .runWith(Sink.ignore) - //#source-queue + // #source-queue } "illustrate use of synchronous source queue" in { - //#source-queue-synchronous + // #source-queue-synchronous val bufferSize = 1000 - //#source-queue-synchronous + // #source-queue-synchronous // format: OFF //#source-queue-synchronous val queue = Source diff --git a/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala b/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala index 0cd30be31f..5e4ab7096a 100644 --- a/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala +++ b/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala @@ -12,23 +12,23 @@ class MigrationsScala extends AkkaSpec { "Examples in migration guide" must { "compile" in { lazy val dontExecuteMe = { - //#expand-continually + // #expand-continually Flow[Int].expand(Iterator.continually(_)) - //#expand-continually - //#expand-state + // #expand-continually + // #expand-state Flow[Int].expand(i => { var state = 0 - Iterator.continually({ + Iterator.continually { state += 1 (i, state) - }) + } }) - //#expand-state + // #expand-state - //#async + // #async val flow = Flow[Int].map(_ + 1) Source(1 to 10).via(flow.async) - //#async + // #async } } } diff --git a/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala index 0535217593..27c98e5175 100644 --- a/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala @@ -37,48 +37,48 @@ class QuickStartDocSpec extends AnyWordSpec with BeforeAndAfterAll with ScalaFut "demonstrate Source" in { implicit val system = ActorSystem("QuickStart") - //#create-source + // #create-source val source: Source[Int, NotUsed] = Source(1 to 100) - //#create-source + // #create-source - //#run-source + // #run-source source.runForeach(i => println(i)) - //#run-source + // #run-source - //#transform-source + // #transform-source val factorials = source.scan(BigInt(1))((acc, next) => acc * next) val result: Future[IOResult] = factorials.map(num => ByteString(s"$num\n")).runWith(FileIO.toPath(Paths.get("factorials.txt"))) - //#transform-source + // #transform-source - //#use-transformed-sink + // #use-transformed-sink factorials.map(_.toString).runWith(lineSink("factorial2.txt")) - //#use-transformed-sink + // #use-transformed-sink - //#add-streams + // #add-streams factorials .zipWith(Source(0 to 100))((num, idx) => s"$idx! = $num") .throttle(1, 1.second) - //#add-streams + // #add-streams .take(3) - //#add-streams + // #add-streams .runForeach(println) - //#add-streams + // #add-streams - //#run-source-and-terminate + // #run-source-and-terminate val done: Future[Done] = source.runForeach(i => println(i)) implicit val ec = system.dispatcher done.onComplete(_ => system.terminate()) - //#run-source-and-terminate + // #run-source-and-terminate done.futureValue } - //#transform-sink + // #transform-sink def lineSink(filename: String): Sink[String, Future[IOResult]] = Flow[String].map(s => ByteString(s + "\n")).toMat(FileIO.toPath(Paths.get(filename)))(Keep.right) - //#transform-sink + // #transform-sink } diff --git a/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala index d6d6b06017..de5f2f66d5 100644 --- a/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala @@ -18,14 +18,14 @@ import scala.concurrent.Await class RateTransformationDocSpec extends AkkaSpec { "conflate should summarize" in { - //#conflate-summarize + // #conflate-summarize val statsFlow = Flow[Double].conflateWithSeed(immutable.Seq(_))(_ :+ _).map { s => val μ = s.sum / s.size val se = s.map(x => pow(x - μ, 2)) val σ = sqrt(se.sum / se.size) (σ, μ, s.size) } - //#conflate-summarize + // #conflate-summarize val fut = Source @@ -38,7 +38,7 @@ class RateTransformationDocSpec extends AkkaSpec { } "conflate should sample" in { - //#conflate-sample + // #conflate-sample val p = 0.01 val sampleFlow = Flow[Double] .conflateWithSeed(immutable.Seq(_)) { @@ -46,7 +46,7 @@ class RateTransformationDocSpec extends AkkaSpec { case (acc, _) => acc } .mapConcat(identity) - //#conflate-sample + // #conflate-sample val fut = Source(1 to 1000).map(_.toDouble).via(sampleFlow).runWith(Sink.fold(Seq.empty[Double])(_ :+ _)) @@ -54,9 +54,9 @@ class RateTransformationDocSpec extends AkkaSpec { } "extrapolate should repeat last" in { - //#extrapolate-last + // #extrapolate-last val lastFlow = Flow[Double].extrapolate(Iterator.continually(_)) - //#extrapolate-last + // #extrapolate-last val (probe, fut) = TestSource.probe[Double].via(lastFlow).grouped(10).toMat(Sink.head)(Keep.both).run() @@ -67,10 +67,10 @@ class RateTransformationDocSpec extends AkkaSpec { } "extrapolate should send seed first" in { - //#extrapolate-seed + // #extrapolate-seed val initial = 2.0 val seedFlow = Flow[Double].extrapolate(Iterator.continually(_), Some(initial)) - //#extrapolate-seed + // #extrapolate-seed val fut = TestSource.probe[Double].via(seedFlow).grouped(10).runWith(Sink.head) @@ -80,9 +80,9 @@ class RateTransformationDocSpec extends AkkaSpec { } "extrapolate should track drift" in { - //#extrapolate-drift + // #extrapolate-drift val driftFlow = Flow[Double].map(_ -> 0).extrapolate[(Double, Int)] { case (i, _) => Iterator.from(1).map(i -> _) } - //#extrapolate-drift + // #extrapolate-drift val latch = TestLatch(2) val realDriftFlow = Flow[Double].map(d => { latch.countDown(); d -> 0; }).extrapolate[(Double, Int)] { case (d, _) => latch.countDown(); Iterator.from(1).map(d -> _) @@ -103,9 +103,9 @@ class RateTransformationDocSpec extends AkkaSpec { } "expand should track drift" in { - //#expand-drift + // #expand-drift val driftFlow = Flow[Double].expand(i => Iterator.from(0).map(i -> _)) - //#expand-drift + // #expand-drift val latch = TestLatch(2) val realDriftFlow = Flow[Double].expand(d => { latch.countDown(); Iterator.from(0).map(d -> _) }) diff --git a/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala index ca28fd5fa7..44e8e6c65e 100644 --- a/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala @@ -12,29 +12,29 @@ import akka.testkit.AkkaSpec class ReactiveStreamsDocSpec extends AkkaSpec { import TwitterStreamQuickstartDocSpec._ - //#imports + // #imports import org.reactivestreams.Publisher import org.reactivestreams.Subscriber import org.reactivestreams.Processor - //#imports + // #imports trait Fixture { - //#authors + // #authors val authors = Flow[Tweet].filter(_.hashtags.contains(akkaTag)).map(_.author) - //#authors + // #authors - //#tweets-publisher + // #tweets-publisher def tweets: Publisher[Tweet] - //#tweets-publisher + // #tweets-publisher - //#author-storage-subscriber + // #author-storage-subscriber def storage: Subscriber[Author] - //#author-storage-subscriber + // #author-storage-subscriber - //#author-alert-subscriber + // #author-alert-subscriber def alert: Subscriber[Author] - //#author-alert-subscriber + // #author-alert-subscriber } val impl = new Fixture { @@ -63,9 +63,9 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#connect-all + // #connect-all Source.fromPublisher(tweets).via(authors).to(Sink.fromSubscriber(storage)).run() - //#connect-all + // #connect-all assertResult(storage) } @@ -74,12 +74,12 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#flow-publisher-subscriber + // #flow-publisher-subscriber val processor: Processor[Tweet, Author] = authors.toProcessor.run() tweets.subscribe(processor) processor.subscribe(storage) - //#flow-publisher-subscriber + // #flow-publisher-subscriber assertResult(storage) } @@ -88,12 +88,12 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#source-publisher + // #source-publisher val authorPublisher: Publisher[Author] = Source.fromPublisher(tweets).via(authors).runWith(Sink.asPublisher(fanout = false)) authorPublisher.subscribe(storage) - //#source-publisher + // #source-publisher assertResult(storage) } @@ -103,13 +103,13 @@ class ReactiveStreamsDocSpec extends AkkaSpec { val storage = impl.storage val alert = impl.alert - //#source-fanoutPublisher + // #source-fanoutPublisher val authorPublisher: Publisher[Author] = Source.fromPublisher(tweets).via(authors).runWith(Sink.asPublisher(fanout = true)) authorPublisher.subscribe(storage) authorPublisher.subscribe(alert) - //#source-fanoutPublisher + // #source-fanoutPublisher // this relies on fanoutPublisher buffer size > number of authors assertResult(storage) @@ -120,24 +120,24 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#sink-subscriber + // #sink-subscriber val tweetSubscriber: Subscriber[Tweet] = authors.to(Sink.fromSubscriber(storage)).runWith(Source.asSubscriber[Tweet]) tweets.subscribe(tweetSubscriber) - //#sink-subscriber + // #sink-subscriber assertResult(storage) } "use a processor" in { - //#use-processor + // #use-processor // An example Processor factory def createProcessor: Processor[Int, Int] = Flow[Int].toProcessor.run() val flow: Flow[Int, Int, NotUsed] = Flow.fromProcessor(() => createProcessor) - //#use-processor + // #use-processor } diff --git a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala index 41129da2fb..51f589ccca 100644 --- a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala @@ -33,7 +33,7 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec { "demonstrate a restart with backoff source" in compileOnlySpec { - //#restart-with-backoff-source + // #restart-with-backoff-source val settings = RestartSettings( minBackoff = 3.seconds, maxBackoff = 30.seconds, @@ -50,9 +50,9 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec { .flatMap(Unmarshal(_).to[Source[ServerSentEvent, NotUsed]]) } } - //#restart-with-backoff-source + // #restart-with-backoff-source - //#with-kill-switch + // #with-kill-switch val killSwitch = restartSource .viaMat(KillSwitches.single)(Keep.right) .toMat(Sink.foreach(event => println(s"Got event: $event")))(Keep.left) @@ -61,7 +61,7 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec { doSomethingElse() killSwitch.shutdown() - //#with-kill-switch + // #with-kill-switch } } diff --git a/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala b/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala index af786d29f8..0e6a2aecf3 100644 --- a/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala @@ -13,11 +13,11 @@ class SinkRecipeDocSpec extends RecipeSpec { "Sink.foreachAsync" must { "processing each element asynchronously" in { def asyncProcessing(value: Int): Future[Unit] = Future { println(value) }(system.dispatcher) - //#forseachAsync-processing - //def asyncProcessing(value: Int): Future[Unit] = _ + // #forseachAsync-processing + // def asyncProcessing(value: Int): Future[Unit] = _ Source(1 to 100).runWith(Sink.foreachAsync(10)(asyncProcessing)) - //#forseachAsync-processing + // #forseachAsync-processing } } } diff --git a/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala index 5f1a546252..c064ec3293 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala @@ -13,7 +13,7 @@ class StreamBuffersRateSpec extends AkkaSpec { "Demonstrate pipelining" in { def println(s: Any) = () - //#pipelining + // #pipelining Source(1 to 3) .map { i => println(s"A: $i"); i @@ -28,22 +28,22 @@ class StreamBuffersRateSpec extends AkkaSpec { } .async .runWith(Sink.ignore) - //#pipelining + // #pipelining } "Demonstrate buffer sizes" in { - //#section-buffer + // #section-buffer val section = Flow[Int].map(_ * 2).async.addAttributes(Attributes.inputBuffer(initial = 1, max = 1)) // the buffer size of this map is 1 val flow = section.via(Flow[Int].map(_ / 2)).async // the buffer size of this map is the default val runnableGraph = Source(1 to 10).via(flow).to(Sink.foreach(elem => println(elem))) val withOverriddenDefaults = runnableGraph.withAttributes(Attributes.inputBuffer(initial = 64, max = 64)) - //#section-buffer + // #section-buffer } "buffering abstraction leak" in { - //#buffering-abstraction-leak + // #buffering-abstraction-leak import scala.concurrent.duration._ case class Tick() @@ -57,42 +57,42 @@ class StreamBuffersRateSpec extends AkkaSpec { Source .tick(initialDelay = 1.second, interval = 1.second, "message!") - .conflateWithSeed(seed = (_) => 1)((count, _) => count + 1) ~> zipper.in1 + .conflateWithSeed(seed = _ => 1)((count, _) => count + 1) ~> zipper.in1 zipper.out ~> Sink.foreach(println) ClosedShape }) - //#buffering-abstraction-leak + // #buffering-abstraction-leak } "explicit buffers" in { trait Job def inboundJobsConnector(): Source[Job, NotUsed] = Source.empty - //#explicit-buffers-backpressure + // #explicit-buffers-backpressure // Getting a stream of jobs from an imaginary external system as a Source val jobs: Source[Job, NotUsed] = inboundJobsConnector() jobs.buffer(1000, OverflowStrategy.backpressure) - //#explicit-buffers-backpressure + // #explicit-buffers-backpressure - //#explicit-buffers-droptail + // #explicit-buffers-droptail jobs.buffer(1000, OverflowStrategy.dropTail) - //#explicit-buffers-droptail + // #explicit-buffers-droptail - //#explicit-buffers-dropnew + // #explicit-buffers-dropnew jobs.buffer(1000, OverflowStrategy.dropNew) - //#explicit-buffers-dropnew + // #explicit-buffers-dropnew - //#explicit-buffers-drophead + // #explicit-buffers-drophead jobs.buffer(1000, OverflowStrategy.dropHead) - //#explicit-buffers-drophead + // #explicit-buffers-drophead - //#explicit-buffers-dropbuffer + // #explicit-buffers-dropbuffer jobs.buffer(1000, OverflowStrategy.dropBuffer) - //#explicit-buffers-dropbuffer + // #explicit-buffers-dropbuffer - //#explicit-buffers-fail + // #explicit-buffers-fail jobs.buffer(1000, OverflowStrategy.fail) - //#explicit-buffers-fail + // #explicit-buffers-fail } diff --git a/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala index 8c7a89e534..bd417b38ab 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala @@ -18,7 +18,7 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { implicit val ec: ExecutionContext = system.dispatcher "build with open ports" in { - //#simple-partial-graph-dsl + // #simple-partial-graph-dsl val pickMaxOfThree = GraphDSL.create() { implicit b => import GraphDSL.Implicits._ @@ -40,17 +40,17 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { Source.single(1) ~> pm3.in(0) Source.single(2) ~> pm3.in(1) Source.single(3) ~> pm3.in(2) - pm3.out ~> sink.in + pm3.out ~> sink.in ClosedShape }) val max: Future[Int] = g.run() Await.result(max, 300.millis) should equal(3) - //#simple-partial-graph-dsl + // #simple-partial-graph-dsl } "build source from partial graph" in { - //#source-from-partial-graph-dsl + // #source-from-partial-graph-dsl val pairs = Source.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ @@ -67,12 +67,12 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { }) val firstPair: Future[(Int, Int)] = pairs.runWith(Sink.head) - //#source-from-partial-graph-dsl + // #source-from-partial-graph-dsl Await.result(firstPair, 300.millis) should equal(1 -> 2) } "build flow from partial graph" in { - //#flow-from-partial-graph-dsl + // #flow-from-partial-graph-dsl val pairUpWithToString = Flow.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ @@ -82,14 +82,14 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { val zip = b.add(Zip[Int, String]()) // connect the graph - broadcast.out(0).map(identity) ~> zip.in0 + broadcast.out(0).map(identity) ~> zip.in0 broadcast.out(1).map(_.toString) ~> zip.in1 // expose ports FlowShape(broadcast.in, zip.out) }) - //#flow-from-partial-graph-dsl + // #flow-from-partial-graph-dsl // format: OFF val (_, matSink: Future[(Int, String)]) = @@ -102,26 +102,26 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { } "combine sources with simplified API" in { - //#source-combine + // #source-combine val sourceOne = Source(List(1)) val sourceTwo = Source(List(2)) val merged = Source.combine(sourceOne, sourceTwo)(Merge(_)) val mergedResult: Future[Int] = merged.runWith(Sink.fold(0)(_ + _)) - //#source-combine + // #source-combine Await.result(mergedResult, 300.millis) should equal(3) } "combine sinks with simplified API" in { val actorRef: ActorRef = testActor - //#sink-combine + // #sink-combine val sendRemotely = Sink.actorRef(actorRef, "Done", _ => "Failed") val localProcessing = Sink.foreach[Int](_ => /* do something useful */ ()) val sink = Sink.combine(sendRemotely, localProcessing)(Broadcast[Int](_)) Source(List(0, 1, 2)).runWith(sink) - //#sink-combine + // #sink-combine expectMsg(0) expectMsg(1) expectMsg(2) diff --git a/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala index c877138ac3..d66a5d29c1 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala @@ -18,18 +18,18 @@ import akka.pattern class StreamTestKitDocSpec extends AkkaSpec { "strict collection" in { - //#strict-collection + // #strict-collection val sinkUnderTest = Flow[Int].map(_ * 2).toMat(Sink.fold(0)(_ + _))(Keep.right) val future = Source(1 to 4).runWith(sinkUnderTest) val result = Await.result(future, 3.seconds) assert(result == 20) - //#strict-collection + // #strict-collection } "grouped part of infinite stream" in { - //#grouped-infinite + // #grouped-infinite import system.dispatcher import akka.pattern.pipe @@ -38,21 +38,21 @@ class StreamTestKitDocSpec extends AkkaSpec { val future = sourceUnderTest.take(10).runWith(Sink.seq) val result = Await.result(future, 3.seconds) assert(result == Seq.fill(10)(2)) - //#grouped-infinite + // #grouped-infinite } "folded stream" in { - //#folded-stream + // #folded-stream val flowUnderTest = Flow[Int].takeWhile(_ < 5) val future = Source(1 to 10).via(flowUnderTest).runWith(Sink.fold(Seq.empty[Int])(_ :+ _)) val result = Await.result(future, 3.seconds) assert(result == (1 to 4)) - //#folded-stream + // #folded-stream } "pipe to test probe" in { - //#pipeto-testprobe + // #pipeto-testprobe import system.dispatcher import akka.pattern.pipe @@ -61,11 +61,11 @@ class StreamTestKitDocSpec extends AkkaSpec { val probe = TestProbe() sourceUnderTest.runWith(Sink.seq).pipeTo(probe.ref) probe.expectMsg(3.seconds, Seq(Seq(1, 2), Seq(3, 4))) - //#pipeto-testprobe + // #pipeto-testprobe } "sink actor ref" in { - //#sink-actorref + // #sink-actorref case object Tick val sourceUnderTest = Source.tick(0.seconds, 200.millis, Tick) @@ -79,11 +79,11 @@ class StreamTestKitDocSpec extends AkkaSpec { probe.expectMsg(3.seconds, Tick) cancellable.cancel() probe.expectMsg(3.seconds, "completed") - //#sink-actorref + // #sink-actorref } "source actor ref" in { - //#source-actorref + // #source-actorref val sinkUnderTest = Flow[Int].map(_.toString).toMat(Sink.fold("")(_ + _))(Keep.right) val (ref, future) = Source @@ -106,39 +106,39 @@ class StreamTestKitDocSpec extends AkkaSpec { val result = Await.result(future, 3.seconds) assert(result == "123") - //#source-actorref + // #source-actorref } "test sink probe" in { - //#test-sink-probe + // #test-sink-probe val sourceUnderTest = Source(1 to 4).filter(_ % 2 == 0).map(_ * 2) sourceUnderTest.runWith(TestSink[Int]()).request(2).expectNext(4, 8).expectComplete() - //#test-sink-probe + // #test-sink-probe } "test source probe" in { - //#test-source-probe + // #test-source-probe val sinkUnderTest = Sink.cancelled TestSource.probe[Int].toMat(sinkUnderTest)(Keep.left).run().expectCancellation() - //#test-source-probe + // #test-source-probe } "injecting failure" in { - //#injecting-failure + // #injecting-failure val sinkUnderTest = Sink.head[Int] val (probe, future) = TestSource.probe[Int].toMat(sinkUnderTest)(Keep.both).run() probe.sendError(new Exception("boom")) assert(future.failed.futureValue.getMessage == "boom") - //#injecting-failure + // #injecting-failure } "test source and a sink" in { import system.dispatcher - //#test-source-and-sink + // #test-source-and-sink val flowUnderTest = Flow[Int].mapAsyncUnordered(2) { sleep => pattern.after(10.millis * sleep, using = system.scheduler)(Future.successful(sleep)) } @@ -154,7 +154,7 @@ class StreamTestKitDocSpec extends AkkaSpec { pub.sendError(new Exception("Power surge in the linear subroutine C-47!")) val ex = sub.expectError() assert(ex.getMessage.contains("C-47")) - //#test-source-and-sink + // #test-source-and-sink } } diff --git a/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala b/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala index 3097d488c2..f2fe669340 100644 --- a/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala @@ -5,40 +5,40 @@ package docs.stream import akka.stream.scaladsl.{ Sink, Source } -import akka.stream.{ SubstreamCancelStrategy } +import akka.stream.SubstreamCancelStrategy import akka.testkit.AkkaSpec class SubstreamDocSpec extends AkkaSpec { "generate substreams by groupBy" in { - //#groupBy1 + // #groupBy1 val source = Source(1 to 10).groupBy(3, _ % 3) - //#groupBy1 + // #groupBy1 - //#groupBy2 + // #groupBy2 Source(1 to 10).groupBy(3, _ % 3).to(Sink.ignore).run() - //#groupBy2 + // #groupBy2 - //#groupBy3 + // #groupBy3 Source(1 to 10).groupBy(3, _ % 3).mergeSubstreams.runWith(Sink.ignore) - //#groupBy3 + // #groupBy3 - //#groupBy4 + // #groupBy4 Source(1 to 10).groupBy(3, _ % 3).mergeSubstreamsWithParallelism(2).runWith(Sink.ignore) - //concatSubstreams is equivalent to mergeSubstreamsWithParallelism(1) + // concatSubstreams is equivalent to mergeSubstreamsWithParallelism(1) Source(1 to 10).groupBy(3, _ % 3).concatSubstreams.runWith(Sink.ignore) - //#groupBy4 + // #groupBy4 } "generate substreams by splitWhen and splitAfter" in { - //#splitWhenAfter + // #splitWhenAfter Source(1 to 10).splitWhen(SubstreamCancelStrategy.drain)(_ == 3) Source(1 to 10).splitAfter(SubstreamCancelStrategy.drain)(_ == 3) - //#splitWhenAfter + // #splitWhenAfter - //#wordCount + // #wordCount val text = "This is the first line.\n" + "The second line.\n" + @@ -51,16 +51,16 @@ class SubstreamDocSpec extends AkkaSpec { .reduce(_ + _) .to(Sink.foreach(println)) .run() - //#wordCount + // #wordCount } "generate substreams by flatMapConcat and flatMapMerge" in { - //#flatMapConcat + // #flatMapConcat Source(1 to 2).flatMapConcat(i => Source(List.fill(3)(i))).runWith(Sink.ignore) - //#flatMapConcat + // #flatMapConcat - //#flatMapMerge + // #flatMapMerge Source(1 to 2).flatMapMerge(2, i => Source(List.fill(3)(i))).runWith(Sink.ignore) - //#flatMapMerge + // #flatMapMerge } } diff --git a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala index 35d678b34f..16425b01d9 100644 --- a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala @@ -20,12 +20,12 @@ import akka.testkit.AkkaSpec import scala.concurrent.ExecutionContext object TwitterStreamQuickstartDocSpec { - //#fiddle_code + // #fiddle_code import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl._ - //#model + // #model final case class Author(handle: String) final case class Hashtag(name: String) @@ -41,17 +41,17 @@ object TwitterStreamQuickstartDocSpec { } val akkaTag = Hashtag("#akka") - //#model + // #model - //#fiddle_code + // #fiddle_code abstract class TweetSourceDecl { - //#tweet-source + // #tweet-source val tweets: Source[Tweet, NotUsed] - //#tweet-source + // #tweet-source } - //#fiddle_code + // #fiddle_code val tweets: Source[Tweet, NotUsed] = Source( Tweet(Author("rolandkuhn"), System.currentTimeMillis, "#akka rocks!") :: Tweet(Author("patriknw"), System.currentTimeMillis, "#akka !") :: @@ -65,7 +65,7 @@ object TwitterStreamQuickstartDocSpec { Tweet(Author("drama"), System.currentTimeMillis, "we compared #apples to #oranges!") :: Nil) - //#fiddle_code + // #fiddle_code } class TwitterStreamQuickstartDocSpec extends AkkaSpec { @@ -77,55 +77,55 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { def println(s: Any): Unit = () trait Example1 { - //#fiddle_code - //#first-sample - //#system-setup + // #fiddle_code + // #first-sample + // #system-setup implicit val system: ActorSystem = ActorSystem("reactive-tweets") - //#system-setup - //#first-sample + // #system-setup + // #first-sample - //#fiddle_code + // #fiddle_code } "filter and map" in { - //#first-sample + // #first-sample - //#authors-filter-map + // #authors-filter-map val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) - //#first-sample - //#authors-filter-map + // #first-sample + // #authors-filter-map trait Example3 { - //#authors-collect + // #authors-collect val authors: Source[Author, NotUsed] = tweets.collect { case t if t.hashtags.contains(akkaTag) => t.author } - //#authors-collect + // #authors-collect } - //#first-sample + // #first-sample - //#authors-foreachsink-println + // #authors-foreachsink-println authors.runWith(Sink.foreach(println)) - //#authors-foreachsink-println - //#first-sample + // #authors-foreachsink-println + // #first-sample - //#authors-foreach-println + // #authors-foreach-println authors.runForeach(println) - //#authors-foreach-println + // #authors-foreach-println } "mapConcat hashtags" in { - //#hashtags-mapConcat + // #hashtags-mapConcat val hashtags: Source[Hashtag, NotUsed] = tweets.mapConcat(_.hashtags.toList) - //#hashtags-mapConcat + // #hashtags-mapConcat } trait HiddenDefinitions { - //#graph-dsl-broadcast + // #graph-dsl-broadcast val writeAuthors: Sink[Author, NotUsed] = ??? val writeHashtags: Sink[Hashtag, NotUsed] = ??? - //#graph-dsl-broadcast + // #graph-dsl-broadcast } "simple broadcast" in { @@ -150,7 +150,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { "simple fiddle showcase" in { - //#fiddle_code + // #fiddle_code tweets .filterNot(_.hashtags.contains(akkaTag)) // Remove all tweets containing #akka hashtag .map(_.hashtags) // Get all sets of hashtags ... @@ -158,7 +158,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { .mapConcat(identity) // Flatten the set of hashtags to a stream of hashtags .map(_.name.toUpperCase) // Convert all hashtags to upper case .runWith(Sink.foreach(println)) // Attach the Flow to a Sink that will finally print the hashtags - //#fiddle_code + // #fiddle_code .value } @@ -168,28 +168,28 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { 42 } - //#tweets-slow-consumption-dropHead + // #tweets-slow-consumption-dropHead tweets.buffer(10, OverflowStrategy.dropHead).map(slowComputation).runWith(Sink.ignore) - //#tweets-slow-consumption-dropHead + // #tweets-slow-consumption-dropHead } "backpressure by readline" in { trait X { import scala.concurrent.duration._ - //#backpressure-by-readline + // #backpressure-by-readline val completion: Future[Done] = Source(1 to 10).map(i => { println(s"map => $i"); i }).runForeach { i => readLine(s"Element = $i; continue reading? [press enter]\n") } Await.ready(completion, 1.minute) - //#backpressure-by-readline + // #backpressure-by-readline } } "count elements on finite stream" in { - //#tweets-fold-count + // #tweets-fold-count val count: Flow[Tweet, Int, NotUsed] = Flow[Tweet].map(_ => 1) val sumSink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _) @@ -200,19 +200,19 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { val sum: Future[Int] = counterGraph.run() sum.foreach(c => println(s"Total tweets processed: $c")) - //#tweets-fold-count + // #tweets-fold-count new AnyRef { - //#tweets-fold-count-oneline + // #tweets-fold-count-oneline val sum: Future[Int] = tweets.map(t => 1).runWith(sumSink) - //#tweets-fold-count-oneline + // #tweets-fold-count-oneline } } "materialize multiple times" in { val tweetsInMinuteFromNow = tweets // not really in second, just acting as if - //#tweets-runnable-flow-materialized-twice + // #tweets-runnable-flow-materialized-twice val sumSink = Sink.fold[Int, Int](0)(_ + _) val counterRunnableGraph: RunnableGraph[Future[Int]] = tweetsInMinuteFromNow.filter(_.hashtags contains akkaTag).map(t => 1).toMat(sumSink)(Keep.right) @@ -222,7 +222,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { // and once in the evening, reusing the flow val eveningTweetsCount: Future[Int] = counterRunnableGraph.run() - //#tweets-runnable-flow-materialized-twice + // #tweets-runnable-flow-materialized-twice val sum: Future[Int] = counterRunnableGraph.run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala index 0bedc7cb10..d5dbfa642d 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala @@ -16,17 +16,17 @@ import scala.concurrent.duration._ class RecipeAdhocSource extends RecipeSpec { - //#adhoc-source + // #adhoc-source def adhocSource[T](source: Source[T, _], timeout: FiniteDuration, maxRetries: Int): Source[T, _] = - Source.lazySource( - () => - source - .backpressureTimeout(timeout) - .recoverWithRetries(maxRetries, { + Source.lazySource(() => + source + .backpressureTimeout(timeout) + .recoverWithRetries(maxRetries, + { case t: TimeoutException => Source.lazySource(() => source.backpressureTimeout(timeout)).mapMaterializedValue(_ => NotUsed) })) - //#adhoc-source + // #adhoc-source "Recipe for adhoc source" must { "not start the source if there is no demand" taggedAs TimingTest in { @@ -45,8 +45,8 @@ class RecipeAdhocSource extends RecipeSpec { "shut down the source when the next demand times out" taggedAs TimingTest in { val shutdown = Promise[Done]() val sink = adhocSource(Source.repeat("a").watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + shutdown.completeWith(term) + }, 200.milliseconds, 3).runWith(TestSink[String]()) sink.requestNext("a") Thread.sleep(200) @@ -56,8 +56,8 @@ class RecipeAdhocSource extends RecipeSpec { "not shut down the source when there are still demands" taggedAs TimingTest in { val shutdown = Promise[Done]() val sink = adhocSource(Source.repeat("a").watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + shutdown.completeWith(term) + }, 200.milliseconds, 3).runWith(TestSink[String]()) sink.requestNext("a") Thread.sleep(100) @@ -80,8 +80,8 @@ class RecipeAdhocSource extends RecipeSpec { val source = Source.empty.mapMaterializedValue(_ => startedCount.incrementAndGet()).concat(Source.repeat("a")) val sink = adhocSource(source.watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + shutdown.completeWith(term) + }, 200.milliseconds, 3).runWith(TestSink[String]()) sink.requestNext("a") startedCount.get() should be(1) @@ -96,8 +96,8 @@ class RecipeAdhocSource extends RecipeSpec { val source = Source.empty.mapMaterializedValue(_ => startedCount.incrementAndGet()).concat(Source.repeat("a")) val sink = adhocSource(source.watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + shutdown.completeWith(term) + }, 200.milliseconds, 3).runWith(TestSink[String]()) sink.requestNext("a") startedCount.get() should be(1) @@ -115,12 +115,12 @@ class RecipeAdhocSource extends RecipeSpec { Thread.sleep(500) sink.requestNext("a") - startedCount.get() should be(4) //startCount == 4, which means "re"-tried 3 times + startedCount.get() should be(4) // startCount == 4, which means "re"-tried 3 times Thread.sleep(500) sink.expectError().getClass should be(classOf[TimeoutException]) - sink.request(1) //send demand - sink.expectNoMessage(200.milliseconds) //but no more restart + sink.request(1) // send demand + sink.expectNoMessage(200.milliseconds) // but no more restart } } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala index fdaf698b30..03b3387e2b 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala @@ -20,7 +20,7 @@ class RecipeByteStrings extends RecipeSpec { val rawBytes = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9))) val ChunkLimit = 2 - //#bytestring-chunker + // #bytestring-chunker import akka.stream.stage._ class Chunker(val chunkSize: Int) extends GraphStage[FlowShape[ByteString, ByteString]] { @@ -30,11 +30,12 @@ class RecipeByteStrings extends RecipeSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var buffer = ByteString.empty - setHandler(out, new OutHandler { - override def onPull(): Unit = { - emitChunk() - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + emitChunk() + } + }) setHandler( in, new InHandler { @@ -72,7 +73,7 @@ class RecipeByteStrings extends RecipeSpec { } val chunksStream = rawBytes.via(new Chunker(ChunkLimit)) - //#bytestring-chunker + // #bytestring-chunker val chunksFuture = chunksStream.limit(10).runWith(Sink.seq) val chunks = Await.result(chunksFuture, 3.seconds) @@ -84,7 +85,7 @@ class RecipeByteStrings extends RecipeSpec { "have a working bytes limiter" in { val SizeLimit = 9 - //#bytes-limiter + // #bytes-limiter import akka.stream.stage._ class ByteLimiter(val maximumBytes: Long) extends GraphStage[FlowShape[ByteString, ByteString]] { val in = Inlet[ByteString]("ByteLimiter.in") @@ -94,24 +95,25 @@ class RecipeByteStrings extends RecipeSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var count = 0 - setHandlers(in, out, new InHandler with OutHandler { + setHandlers(in, out, + new InHandler with OutHandler { - override def onPull(): Unit = { - pull(in) - } + override def onPull(): Unit = { + pull(in) + } - override def onPush(): Unit = { - val chunk = grab(in) - count += chunk.size - if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes")) - else push(out, chunk) - } - }) + override def onPush(): Unit = { + val chunk = grab(in) + count += chunk.size + if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes")) + else push(out, chunk) + } + }) } } val limiter = Flow[ByteString].via(new ByteLimiter(SizeLimit)) - //#bytes-limiter + // #bytes-limiter val bytes1 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9))) val bytes2 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9, 10))) @@ -128,9 +130,9 @@ class RecipeByteStrings extends RecipeSpec { val data = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9))) - //#compacting-bytestrings + // #compacting-bytestrings val compacted: Source[ByteString, NotUsed] = data.map(_.compact) - //#compacting-bytestrings + // #compacting-bytestrings Await.result(compacted.limit(10).runWith(Sink.seq), 3.seconds).forall(_.isCompact) should be(true) } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala index 70fca1bd5f..8b28034175 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala @@ -14,16 +14,16 @@ import scala.concurrent.duration._ class RecipeDecompress extends RecipeSpec { "Recipe for decompressing a Gzip stream" must { "work" in { - //#decompress-gzip + // #decompress-gzip import akka.stream.scaladsl.Compression - //#decompress-gzip + // #decompress-gzip val compressed = Source.single(ByteString.fromString("Hello World")).via(Compression.gzip) - //#decompress-gzip + // #decompress-gzip val uncompressed = compressed.via(Compression.gunzip()).map(_.utf8String) - //#decompress-gzip + // #decompress-gzip Await.result(uncompressed.runWith(Sink.head), 3.seconds) should be("Hello World") } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala index 879d29e8bd..1c68ebd086 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala @@ -13,7 +13,7 @@ class RecipeDigest extends RecipeSpec { "work" in { - //#calculating-digest + // #calculating-digest import java.security.MessageDigest import akka.NotUsed @@ -33,31 +33,33 @@ class RecipeDigest extends RecipeSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private val digest = MessageDigest.getInstance(algorithm) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) - setHandler(in, new InHandler { - override def onPush(): Unit = { - val chunk = grab(in) - digest.update(chunk.toArray) - pull(in) - } + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val chunk = grab(in) + digest.update(chunk.toArray) + pull(in) + } - override def onUpstreamFinish(): Unit = { - emit(out, ByteString(digest.digest())) - completeStage() - } - }) + override def onUpstreamFinish(): Unit = { + emit(out, ByteString(digest.digest())) + completeStage() + } + }) } } val digest: Source[ByteString, NotUsed] = data.via(new DigestCalculator("SHA-256")) - //#calculating-digest + // #calculating-digest Await.result(digest.runWith(Sink.head), 3.seconds) should be( - ByteString(0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, - 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad)) + ByteString(0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01, 0xCF, 0xEA, 0x41, 0x41, 0x40, 0xDE, 0x5D, 0xAE, 0x22, 0x23, 0xB0, + 0x03, 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C, 0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00, 0x15, 0xAD)) } } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala index 83a2a464ab..a3189e862f 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala @@ -23,7 +23,7 @@ class RecipeDroppyBroadcast extends RecipeSpec { val mySink2 = Sink.fromSubscriber(sub2) val mySink3 = Sink.fromSubscriber(sub3) - //#droppy-bcast + // #droppy-bcast val graph = RunnableGraph.fromGraph(GraphDSL.createGraph(mySink1, mySink2, mySink3)((_, _, _)) { implicit b => (sink1, sink2, sink3) => import GraphDSL.Implicits._ @@ -36,7 +36,7 @@ class RecipeDroppyBroadcast extends RecipeSpec { bcast.buffer(10, OverflowStrategy.dropHead) ~> sink3 ClosedShape }) - //#droppy-bcast + // #droppy-bcast graph.run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala index b3a8e5afb0..74168c1beb 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala @@ -18,10 +18,10 @@ class RecipeFlattenSeq extends RecipeSpec { val someDataSource = Source(List(List("1"), List("2"), List("3", "4", "5"), List("6", "7"))) - //#flattening-seqs + // #flattening-seqs val myData: Source[List[Message], NotUsed] = someDataSource val flattened: Source[Message, NotUsed] = myData.mapConcat(identity) - //#flattening-seqs + // #flattening-seqs Await.result(flattened.limit(8).runWith(Sink.seq), 3.seconds) should be(List("1", "2", "3", "4", "5", "6", "7")) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala index a084970e6d..6b16d2e6a9 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala @@ -18,7 +18,7 @@ class RecipeGlobalRateLimit extends RecipeSpec { "Global rate limiting recipe" must { - //#global-limiter-actor + // #global-limiter-actor object Limiter { case object WantToPass case object MayPass @@ -75,11 +75,11 @@ class RecipeGlobalRateLimit extends RecipeSpec { waitQueue.foreach(_ ! Status.Failure(new IllegalStateException("limiter stopped"))) } } - //#global-limiter-actor + // #global-limiter-actor "work" in { - //#global-limiter-flow + // #global-limiter-flow def limitGlobal[T](limiter: ActorRef, maxAllowedWait: FiniteDuration): Flow[T, T, NotUsed] = { import akka.pattern.ask import akka.util.Timeout @@ -87,11 +87,11 @@ class RecipeGlobalRateLimit extends RecipeSpec { import system.dispatcher implicit val triggerTimeout = Timeout(maxAllowedWait) val limiterTriggerFuture = limiter ? Limiter.WantToPass - limiterTriggerFuture.map((_) => element) + limiterTriggerFuture.map(_ => element) }) } - //#global-limiter-flow + // #global-limiter-flow // Use a large period and emulate the timer by hand instead val limiter = system.actorOf(Limiter.props(2, 100.days, 1), "limiter") diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala index d6b3f99e8b..14c0430adc 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala @@ -11,7 +11,7 @@ import akka.stream.testkit._ import scala.concurrent.duration._ object HoldOps { - //#hold-version-1 + // #hold-version-1 import akka.stream._ import akka.stream.stage._ final class HoldWithInitial[T](initial: T) extends GraphStage[FlowShape[T, T]] { @@ -23,16 +23,17 @@ object HoldOps { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var currentValue: T = initial - setHandlers(in, out, new InHandler with OutHandler { - override def onPush(): Unit = { - currentValue = grab(in) - pull(in) - } + setHandlers(in, out, + new InHandler with OutHandler { + override def onPush(): Unit = { + currentValue = grab(in) + pull(in) + } - override def onPull(): Unit = { - push(out, currentValue) - } - }) + override def onPull(): Unit = { + push(out, currentValue) + } + }) override def preStart(): Unit = { pull(in) @@ -40,9 +41,9 @@ object HoldOps { } } - //#hold-version-1 + // #hold-version-1 - //#hold-version-2 + // #hold-version-2 import akka.stream._ import akka.stream.stage._ final class HoldWithWait[T] extends GraphStage[FlowShape[T, T]] { @@ -78,7 +79,7 @@ object HoldOps { } } } - //#hold-version-2 + // #hold-version-2 } class RecipeHold extends RecipeSpec { diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala index ceadb019c8..d39b70ecd8 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala @@ -15,11 +15,11 @@ class RecipeKeepAlive extends RecipeSpec { "work" in { val keepaliveMessage = ByteString(11) - //#inject-keepalive + // #inject-keepalive import scala.concurrent.duration._ val injectKeepAlive: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString].keepAlive(1.second, () => keepaliveMessage) - //#inject-keepalive + // #inject-keepalive // No need to test, this is a built-in stage with proper tests } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala index 2e96bf7a02..334181caed 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala @@ -20,11 +20,11 @@ class RecipeLoggingElements extends RecipeSpec { val mySource = Source(List("1", "2", "3")) - //#println-debug + // #println-debug val loggedSource = mySource.map { elem => println(elem); elem } - //#println-debug + // #println-debug loggedSource.runWith(Sink.ignore) printProbe.expectMsgAllOf("1", "2", "3") @@ -33,22 +33,22 @@ class RecipeLoggingElements extends RecipeSpec { val mySource = Source(List("1", "2", "3")) def analyse(s: String) = s "use log()" in { - //#log-custom + // #log-custom // customise log levels mySource .log("before-map") .withAttributes(Attributes .logLevels(onElement = Logging.WarningLevel, onFinish = Logging.InfoLevel, onFailure = Logging.DebugLevel)) .map(analyse) - //#log-custom + // #log-custom } "use log() with custom adapter" in { - //#log-custom + // #log-custom // or provide custom logging adapter implicit val adapter: LoggingAdapter = Logging(system, "customLogger") mySource.log("custom") - //#log-custom + // #log-custom val loggedSource = mySource.log("custom") EventFilter.debug(start = "[custom] Element: ").intercept { @@ -57,12 +57,12 @@ class RecipeLoggingElements extends RecipeSpec { } "use log() for error logging" in { - //#log-error + // #log-error Source(-5 to 5) - .map(1 / _) //throwing ArithmeticException: / by zero + .map(1 / _) // throwing ArithmeticException: / by zero .log("error logging") .runWith(Sink.ignore) - //#log-error + // #log-error } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala index 0e24d268cc..220f30f0a3 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala @@ -22,16 +22,16 @@ class RecipeManualTrigger extends RecipeSpec { val triggerSource = Source.fromPublisher(pub) val sink = Sink.fromSubscriber(sub) - //#manually-triggered-stream + // #manually-triggered-stream val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val zip = builder.add(Zip[Message, Trigger]()) - elements ~> zip.in0 + elements ~> zip.in0 triggerSource ~> zip.in1 - zip.out ~> Flow[(Message, Trigger)].map { case (msg, trigger) => msg } ~> sink + zip.out ~> Flow[(Message, Trigger)].map { case (msg, trigger) => msg } ~> sink ClosedShape }) - //#manually-triggered-stream + // #manually-triggered-stream graph.run() @@ -61,17 +61,17 @@ class RecipeManualTrigger extends RecipeSpec { val triggerSource = Source.fromPublisher(pub) val sink = Sink.fromSubscriber(sub) - //#manually-triggered-stream-zipwith + // #manually-triggered-stream-zipwith val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val zip = builder.add(ZipWith((msg: Message, trigger: Trigger) => msg)) - elements ~> zip.in0 + elements ~> zip.in0 triggerSource ~> zip.in1 - zip.out ~> sink + zip.out ~> sink ClosedShape }) - //#manually-triggered-stream-zipwith + // #manually-triggered-stream-zipwith graph.run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala index 0761aa0dcc..b0178e522f 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala @@ -23,13 +23,13 @@ class RecipeMissedTicks extends RecipeSpec { val tickStream = Source.fromPublisher(pub) val sink = Sink.fromSubscriber(sub) - //#missed-ticks + // #missed-ticks val missedTicks: Flow[Tick, Int, NotUsed] = - Flow[Tick].conflateWithSeed(seed = (_) => 0)((missedTicks, tick) => missedTicks + 1) - //#missed-ticks + Flow[Tick].conflateWithSeed(seed = _ => 0)((missedTicks, tick) => missedTicks + 1) + // #missed-ticks val latch = TestLatch(3) val realMissedTicks: Flow[Tick, Int, NotUsed] = - Flow[Tick].conflateWithSeed(seed = (_) => 0)((missedTicks, tick) => { latch.countDown(); missedTicks + 1 }) + Flow[Tick].conflateWithSeed(seed = _ => 0)((missedTicks, tick) => { latch.countDown(); missedTicks + 1 }) tickStream.via(realMissedTicks).to(sink).run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala index a4c3c7c489..61e37158bd 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala @@ -25,7 +25,7 @@ class RecipeMultiGroupBy extends RecipeSpec { else List(Topic("1"), Topic("2")) } - //#multi-groupby + // #multi-groupby val topicMapper: (Message) => immutable.Seq[Topic] = extractTopics val messageAndTopic: Source[(Message, Topic), NotUsed] = elems.mapConcat { (msg: Message) => @@ -38,11 +38,11 @@ class RecipeMultiGroupBy extends RecipeSpec { val multiGroups = messageAndTopic.groupBy(2, _._2).map { case (msg, topic) => // do what needs to be done - //#multi-groupby + // #multi-groupby (msg, topic) - //#multi-groupby + // #multi-groupby } - //#multi-groupby + // #multi-groupby val result = multiGroups .grouped(10) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala index d1cd6a68d9..10012a00a1 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala @@ -24,12 +24,12 @@ class RecipeParseLines extends RecipeSpec { ByteString("\nHello Akka!\r\nHello Streams!"), ByteString("\r\n\r\n"))) - //#parse-lines + // #parse-lines import akka.stream.scaladsl.Framing val linesStream = rawData .via(Framing.delimiter(ByteString("\r\n"), maximumFrameLength = 100, allowTruncation = true)) .map(_.utf8String) - //#parse-lines + // #parse-lines Await.result(linesStream.limit(10).runWith(Sink.seq), 3.seconds) should be( List("Hello World\r!", "Hello Akka!", "Hello Streams!", "")) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala index ce9083aebe..b34d14d375 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala @@ -19,17 +19,17 @@ class RecipeReduceByKey extends RecipeSpec { def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!")) - //#word-count + // #word-count val counts: Source[(String, Int), NotUsed] = words - // split the words into separate streams first + // split the words into separate streams first .groupBy(MaximumDistinctWords, identity) - //transform each element to pair with number of words in it + // transform each element to pair with number of words in it .map(_ -> 1) // add counting logic to the streams .reduce((l, r) => (l._1, l._2 + r._2)) // get a stream of word counts .mergeSubstreams - //#word-count + // #word-count Await.result(counts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( Set(("hello", 2), ("world", 1), ("and", 1), ("universe", 1), ("akka", 1), ("rocks!", 1000))) @@ -39,7 +39,7 @@ class RecipeReduceByKey extends RecipeSpec { def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!")) - //#reduce-by-key-general + // #reduce-by-key-general def reduceByKey[In, K, Out](maximumGroupSize: Int, groupKey: (In) => K, map: (In) => Out)( reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = { @@ -53,7 +53,7 @@ class RecipeReduceByKey extends RecipeSpec { val wordCounts = words.via( reduceByKey(MaximumDistinctWords, groupKey = (word: String) => word, map = (word: String) => 1)( (left: Int, right: Int) => left + right)) - //#reduce-by-key-general + // #reduce-by-key-general Await.result(wordCounts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( Set(("hello", 2), ("world", 1), ("and", 1), ("universe", 1), ("akka", 1), ("rocks!", 1000))) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala index 0b2c716a60..fccd347b61 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala @@ -13,16 +13,16 @@ class RecipeSeq extends RecipeSpec { "not be done unsafely" in { val mySource = Source(1 to 3).map(_.toString) - //#draining-to-seq-unsafe + // #draining-to-seq-unsafe // Dangerous: might produce a collection with 2 billion elements! val f: Future[Seq[String]] = mySource.runWith(Sink.seq) - //#draining-to-seq-unsafe + // #draining-to-seq-unsafe f.futureValue should ===(Seq("1", "2", "3")) } "be done safely" in { val mySource = Source(1 to 3).map(_.toString) - //#draining-to-seq-safe + // #draining-to-seq-safe val MAX_ALLOWED_SIZE = 100 // OK. Future will fail with a `StreamLimitReachedException` @@ -33,7 +33,7 @@ class RecipeSeq extends RecipeSpec { // OK. Collect up until max-th elements only, then cancel upstream val ignoreOverflow: Future[Seq[String]] = mySource.take(MAX_ALLOWED_SIZE).runWith(Sink.seq) - //#draining-to-seq-safe + // #draining-to-seq-safe limited.futureValue should ===(Seq("1", "2", "3")) ignoreOverflow.futureValue should ===(Seq("1", "2", "3")) } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala index 3876a525fc..afa466d913 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala @@ -17,10 +17,10 @@ class RecipeSimpleDrop extends RecipeSpec { "work" in { - //#simple-drop + // #simple-drop val droppyStream: Flow[Message, Message, NotUsed] = Flow[Message].conflate((lastMessage, newMessage) => newMessage) - //#simple-drop + // #simple-drop val latch = TestLatch(2) val realDroppyStream = Flow[Message].conflate((lastMessage, newMessage) => { latch.countDown(); newMessage }) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala index 8f7179dd2e..aa3a978542 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala @@ -16,9 +16,9 @@ class RecipeSourceFromFunction extends RecipeSpec { "be a mapping of Source.repeat" in { def builderFunction(): String = UUID.randomUUID.toString - //#source-from-function + // #source-from-function val source = Source.repeat(NotUsed).map(_ => builderFunction()) - //#source-from-function + // #source-from-function val f = source.take(2).runWith(Sink.seq) f.futureValue.distinct.size should ===(2) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala index cf4a2b81d8..430e0d2397 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala @@ -19,47 +19,47 @@ class RecipeSplitter extends AnyWordSpec with BeforeAndAfterAll with Matchers wi "Splitter" should { " simple split " in { - //#Simple-Split - //Sample Source + // #Simple-Split + // Sample Source val source: Source[String, NotUsed] = Source(List("1-2-3", "2-3", "3-4")) val ret = source .map(s => s.split("-").toList) .mapConcat(identity) - //Sub-streams logic + // Sub-streams logic .map(s => s.toInt) .runWith(Sink.seq) - //Verify results + // Verify results ret.futureValue should be(Vector(1, 2, 3, 2, 3, 3, 4)) - //#Simple-Split + // #Simple-Split } " aggregate split" in { - //#Aggregate-Split - //Sample Source + // #Aggregate-Split + // Sample Source val source: Source[String, NotUsed] = Source(List("1-2-3", "2-3", "3-4")) val result = source .map(s => s.split("-").toList) - //split all messages into sub-streams + // split all messages into sub-streams .splitWhen(a => true) - //now split each collection + // now split each collection .mapConcat(identity) - //Sub-streams logic + // Sub-streams logic .map(s => s.toInt) - //aggregate each sub-stream + // aggregate each sub-stream .reduce((a, b) => a + b) - //and merge back the result into the original stream + // and merge back the result into the original stream .mergeSubstreams .runWith(Sink.seq); - //Verify results + // Verify results result.futureValue should be(Vector(6, 5, 7)) - //#Aggregate-Split + // #Aggregate-Split } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala index 2db90e2896..a2aa4197e1 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala @@ -21,7 +21,7 @@ class RecipeWorkerPool extends RecipeSpec { val worker = Flow[String].map(_ + " done") - //#worker-pool + // #worker-pool def balancer[In, Out](worker: Flow[In, Out, Any], workerCount: Int): Flow[In, Out, NotUsed] = { import GraphDSL.Implicits._ @@ -40,7 +40,7 @@ class RecipeWorkerPool extends RecipeSpec { } val processedJobs: Source[Result, NotUsed] = myJobs.via(balancer(worker, 3)) - //#worker-pool + // #worker-pool Await.result(processedJobs.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( Set("1 done", "2 done", "3 done", "4 done", "5 done")) diff --git a/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala b/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala index eaed0b64e5..aa3034a54b 100644 --- a/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala @@ -27,45 +27,45 @@ class StreamFileDocSpec extends AkkaSpec(UnboundedMailboxConfig) { override def afterTermination() = Files.delete(file) { - //#file-source + // #file-source import akka.stream.scaladsl._ - //#file-source + // #file-source Thread.sleep(0) // needs a statement here for valid syntax and to avoid "unused" warnings } { - //#file-source + // #file-source val file = Paths.get("example.csv") - //#file-source + // #file-source } { - //#file-sink + // #file-sink val file = Paths.get("greeting.txt") - //#file-sink + // #file-sink } "read data from a file" in { - //#file-source - def handle(b: ByteString): Unit //#file-source + // #file-source + def handle(b: ByteString): Unit // #file-source = () - //#file-source + // #file-source val foreach: Future[IOResult] = FileIO.fromPath(file).to(Sink.ignore).run() - //#file-source + // #file-source } "configure dispatcher in code" in { - //#custom-dispatcher-code + // #custom-dispatcher-code FileIO.fromPath(file).withAttributes(ActorAttributes.dispatcher("custom-blocking-io-dispatcher")) - //#custom-dispatcher-code + // #custom-dispatcher-code } "write data into a file" in { - //#file-sink + // #file-sink val text = Source.single("Hello Akka Stream!") val result: Future[IOResult] = text.map(t => ByteString(t)).runWith(FileIO.toPath(file)) - //#file-sink + // #file-sink } } diff --git a/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala b/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala index 2e72f57858..b372369802 100644 --- a/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala @@ -25,7 +25,7 @@ class StreamTcpDocSpec extends AkkaSpec { "simple server connection" in { { - //#echo-server-simple-bind + // #echo-server-simple-bind val binding: Future[ServerBinding] = Tcp(system).bind("127.0.0.1", 8888).to(Sink.ignore).run() @@ -34,11 +34,11 @@ class StreamTcpDocSpec extends AkkaSpec { case _ => // ... } } - //#echo-server-simple-bind + // #echo-server-simple-bind } { val (host, port) = SocketUtil.temporaryServerHostnameAndPort() - //#echo-server-simple-handle + // #echo-server-simple-handle import akka.stream.scaladsl.Framing val connections: Source[IncomingConnection, Future[ServerBinding]] = @@ -54,7 +54,7 @@ class StreamTcpDocSpec extends AkkaSpec { connection.handleWith(echo) } - //#echo-server-simple-handle + // #echo-server-simple-handle } } @@ -66,7 +66,7 @@ class StreamTcpDocSpec extends AkkaSpec { import akka.stream.scaladsl.Framing val binding = - //#welcome-banner-chat-server + // #welcome-banner-chat-server connections .to(Sink.foreach { connection => // server logic, parses incoming commands @@ -79,11 +79,11 @@ class StreamTcpDocSpec extends AkkaSpec { val serverLogic = Flow[ByteString] .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true)) .map(_.utf8String) - //#welcome-banner-chat-server + // #welcome-banner-chat-server .map { command => serverProbe.ref ! command; command } - //#welcome-banner-chat-server + // #welcome-banner-chat-server .via(commandParser) // merge in the initial banner after parser .merge(welcome) @@ -93,7 +93,7 @@ class StreamTcpDocSpec extends AkkaSpec { connection.handleWith(serverLogic) }) .run() - //#welcome-banner-chat-server + // #welcome-banner-chat-server // make sure server is started before we connect binding.futureValue @@ -110,14 +110,14 @@ class StreamTcpDocSpec extends AkkaSpec { { // just for docs, never actually used - //#repl-client + // #repl-client val connection = Tcp(system).outgoingConnection("127.0.0.1", 8888) - //#repl-client + // #repl-client } { val connection = Tcp(system).outgoingConnection(localhost) - //#repl-client + // #repl-client val replParser = Flow[String].takeWhile(_ != "q").concat(Source.single("BYE")).map(elem => ByteString(s"$elem\n")) @@ -130,7 +130,7 @@ class StreamTcpDocSpec extends AkkaSpec { .via(replParser) val connected = connection.join(repl).run() - //#repl-client + // #repl-client // make sure we have a connection or fail already here connected.futureValue diff --git a/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala b/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala index de9251fe40..de65a3e84f 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala @@ -15,7 +15,7 @@ object BroadcastDocExample { implicit val system: ActorSystem = ActorSystem("BroadcastDocExample") - //#broadcast + // #broadcast import akka.NotUsed import akka.stream.ClosedShape import akka.stream.scaladsl.GraphDSL @@ -36,25 +36,25 @@ object BroadcastDocExample { implicit builder => (countS, minS, maxS) => import GraphDSL.Implicits._ val broadcast = builder.add(Broadcast[Int](3)) - source ~> broadcast + source ~> broadcast broadcast.out(0) ~> countS broadcast.out(1) ~> minS broadcast.out(2) ~> maxS ClosedShape }) .run() - //#broadcast + // #broadcast - //#broadcast-async + // #broadcast-async RunnableGraph.fromGraph(GraphDSL.createGraph(countSink.async, minSink.async, maxSink.async)(Tuple3.apply) { implicit builder => (countS, minS, maxS) => import GraphDSL.Implicits._ val broadcast = builder.add(Broadcast[Int](3)) - source ~> broadcast + source ~> broadcast broadcast.out(0) ~> countS broadcast.out(1) ~> minS broadcast.out(2) ~> maxS ClosedShape }) - //#broadcast-async + // #broadcast-async } diff --git a/akka-docs/src/test/scala/docs/stream/operators/Map.scala b/akka-docs/src/test/scala/docs/stream/operators/Map.scala index 2f670d4653..3bf84ce759 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/Map.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/Map.scala @@ -12,8 +12,8 @@ import akka.stream.scaladsl._ object Map { - //#map + // #map val source: Source[Int, NotUsed] = Source(1 to 10) val mapped: Source[String, NotUsed] = source.map(elem => elem.toString) - //#map + // #map } diff --git a/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala b/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala index 6654d48e51..a98dd25514 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala @@ -24,10 +24,11 @@ object MergeSequenceDocExample { .fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ // Partitions stream into messages that should or should not be processed - val partition = builder.add(Partition[(Message, Long)](2, { - case (message, _) if shouldProcess(message) => 0 - case _ => 1 - })) + val partition = builder.add(Partition[(Message, Long)](2, + { + case (message, _) if shouldProcess(message) => 0 + case _ => 1 + })) // Merges stream by the index produced by zipWithIndex val merge = builder.add(MergeSequence[(Message, Long)](2)(_._2)) @@ -35,7 +36,7 @@ object MergeSequenceDocExample { // First goes through message processor partition.out(0) ~> messageProcessor ~> merge // Second partition bypasses message processor - partition.out(1) ~> merge + partition.out(1) ~> merge merge.out.map(_._1) ~> messageAcknowledger ClosedShape }) diff --git a/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala b/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala index 1f57bcf5f9..54db58f254 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala @@ -10,7 +10,7 @@ object PartitionDocExample { implicit val system: ActorSystem = ??? - //#partition + // #partition import akka.NotUsed import akka.stream.Attributes import akka.stream.Attributes.LogLevels @@ -33,12 +33,12 @@ object PartitionDocExample { .fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val partition = builder.add(Partition[Int](2, element => if (element % 2 == 0) 0 else 1)) - source ~> partition.in + source ~> partition.in partition.out(0) ~> even partition.out(1) ~> odd ClosedShape }) .run() - //#partition + // #partition } diff --git a/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala b/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala index b5c3966222..ea6fbab627 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala @@ -12,7 +12,7 @@ object SourceOperators { implicit val system: ActorSystem = ??? def fromFuture(): Unit = { - //#sourceFromFuture + // #sourceFromFuture import akka.actor.ActorSystem import akka.stream.scaladsl._ @@ -23,12 +23,12 @@ object SourceOperators { val source: Source[Int, NotUsed] = Source.future(Future.successful(10)) val sink: Sink[Int, Future[Done]] = Sink.foreach((i: Int) => println(i)) - val done: Future[Done] = source.runWith(sink) //10 - //#sourceFromFuture + val done: Future[Done] = source.runWith(sink) // 10 + // #sourceFromFuture } def actorRef(): Unit = { - //#actorRef + // #actorRef import akka.Done import akka.actor.ActorRef import akka.stream.OverflowStrategy @@ -52,11 +52,11 @@ object SourceOperators { // The stream completes successfully with the following message actorRef ! Done - //#actorRef + // #actorRef } def actorRefWithBackpressure(): Unit = { - //#actorRefWithBackpressure + // #actorRefWithBackpressure import akka.actor.Status.Success import akka.actor.ActorRef @@ -82,11 +82,11 @@ object SourceOperators { // The stream completes successfully with the following message actorRef ! Success(()) - //#actorRefWithBackpressure + // #actorRefWithBackpressure } def maybe(): Unit = { - //#maybe + // #maybe import akka.stream.scaladsl._ import scala.concurrent.Promise @@ -98,6 +98,6 @@ object SourceOperators { // a new Promise is returned when the stream is materialized val promise2 = source.run() promise2.success(Some(2)) // prints 2 - //#maybe + // #maybe } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala b/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala index 727cfffd42..fe737ee7ce 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala @@ -28,7 +28,7 @@ class WithContextSpec extends AkkaSpec { .map(_._1) // keep the first tuple element as stream element val mapped: SourceWithContext[String, Int, NotUsed] = sourceWithContext - // regular operators apply to the element without seeing the context + // regular operators apply to the element without seeing the context .map(s => s.reverse) // running the source and asserting the outcome @@ -63,7 +63,7 @@ class WithContextSpec extends AkkaSpec { .map(_._1) // keep the first pair element as stream element val mapped = flowWithContext - // regular operators apply to the element without seeing the context + // regular operators apply to the element without seeing the context .map(_.reverse) // running the flow with some sample data and asserting the outcome diff --git a/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala b/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala index 5d00578d12..9f9d45ca0d 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala @@ -23,25 +23,25 @@ import scala.concurrent.Future class StreamConvertersToJava extends AkkaSpec with Futures { "demonstrate materialization to Java8 streams" in { - //#asJavaStream + // #asJavaStream val source: Source[Int, NotUsed] = Source(0 to 9).filter(_ % 2 == 0) val sink: Sink[Int, stream.Stream[Int]] = StreamConverters.asJavaStream[Int]() val jStream: java.util.stream.Stream[Int] = source.runWith(sink) - //#asJavaStream + // #asJavaStream jStream.count should be(5) } "demonstrate conversion from Java8 streams" in { - //#fromJavaStream + // #fromJavaStream def factory(): IntStream = IntStream.rangeClosed(0, 9) val source: Source[Int, NotUsed] = StreamConverters.fromJavaStream(() => factory()).map(_.intValue()) val sink: Sink[Int, Future[immutable.Seq[Int]]] = Sink.seq[Int] val futureInts: Future[immutable.Seq[Int]] = source.toMat(sink)(Keep.right).run() - //#fromJavaStream + // #fromJavaStream whenReady(futureInts) { ints => ints should be((0 to 9).toSeq) } diff --git a/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala b/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala index d9209eb0fb..361920294b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala @@ -23,7 +23,7 @@ class ToFromJavaIOStreams extends AkkaSpec with Futures { "demonstrate conversion from java.io.streams" in { - //#tofromJavaIOStream + // #tofromJavaIOStream val bytes = "Some random input".getBytes val inputStream = new ByteArrayInputStream(bytes) val outputStream = new ByteArrayOutputStream() @@ -36,7 +36,7 @@ class ToFromJavaIOStreams extends AkkaSpec with Futures { val eventualResult = source.via(toUpperCase).runWith(sink) - //#tofromJavaIOStream + // #tofromJavaIOStream whenReady(eventualResult) { _ => outputStream.toByteArray.map(_.toChar).mkString should be("SOME RANDOM INPUT") } @@ -44,26 +44,26 @@ class ToFromJavaIOStreams extends AkkaSpec with Futures { } "demonstrate usage as java.io.InputStream" in { - //#asJavaInputStream + // #asJavaInputStream val toUpperCase: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString].map(_.map(_.toChar.toUpper.toByte)) val source: Source[ByteString, NotUsed] = Source.single(ByteString("some random input")) val sink: Sink[ByteString, InputStream] = StreamConverters.asInputStream() val inputStream: InputStream = source.via(toUpperCase).runWith(sink) - //#asJavaInputStream + // #asJavaInputStream inputStream.read() should be('S') inputStream.close() } "demonstrate usage as java.io.OutputStream" in { - //#asJavaOutputStream + // #asJavaOutputStream val source: Source[ByteString, OutputStream] = StreamConverters.asOutputStream() val sink: Sink[ByteString, Future[ByteString]] = Sink.fold[ByteString, ByteString](ByteString.empty)(_ ++ _) val (outputStream, result): (OutputStream, Future[ByteString]) = source.toMat(sink)(Keep.both).run() - //#asJavaOutputStream + // #asJavaOutputStream val bytesArray = Array.fill[Byte](3)(Random.nextInt(1024).asInstanceOf[Byte]) outputStream.write(bytesArray) outputStream.close() diff --git a/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala b/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala index 2ca3cb050f..bee0f5d1e1 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala @@ -11,20 +11,20 @@ object StatefulMap { implicit val actorSystem: ActorSystem = ??? def indexed(): Unit = { - //#zipWithIndex + // #zipWithIndex Source(List("A", "B", "C", "D")) .statefulMap(() => 0L)((index, elem) => (index + 1, (elem, index)), _ => None) .runForeach(println) - //prints - //(A,0) - //(B,1) - //(C,2) - //(D,3) - //#zipWithIndex + // prints + // (A,0) + // (B,1) + // (C,2) + // (D,3) + // #zipWithIndex } def bufferUntilChanged(): Unit = { - //#bufferUntilChanged + // #bufferUntilChanged Source("A" :: "B" :: "B" :: "C" :: "C" :: "C" :: "D" :: Nil) .statefulMap(() => List.empty[String])( (buffer, element) => @@ -35,16 +35,16 @@ object StatefulMap { buffer => Some(buffer)) .filter(_.nonEmpty) .runForeach(println) - //prints - //List(A) - //List(B, B) - //List(C, C, C) - //List(D) - //#bufferUntilChanged + // prints + // List(A) + // List(B, B) + // List(C, C, C) + // List(D) + // #bufferUntilChanged } def distinctUntilChanged(): Unit = { - //#distinctUntilChanged + // #distinctUntilChanged Source("A" :: "B" :: "B" :: "C" :: "C" :: "C" :: "D" :: Nil) .statefulMap(() => Option.empty[String])( (lastElement, elem) => @@ -55,20 +55,20 @@ object StatefulMap { _ => None) .collect { case Some(elem) => elem } .runForeach(println) - //prints - //A - //B - //C - //D - //#distinctUntilChanged + // prints + // A + // B + // C + // D + // #distinctUntilChanged } def statefulMapConcatLike(): Unit = { - //#statefulMapConcatLike + // #statefulMapConcatLike Source(1 to 10) .statefulMap(() => List.empty[Int])( (state, elem) => { - //grouped 3 elements into a list + // grouped 3 elements into a list val newState = elem :: state if (newState.size == 3) (Nil, newState.reverse) @@ -78,17 +78,17 @@ object StatefulMap { state => Some(state.reverse)) .mapConcat(identity) .runForeach(println) - //prints - //1 - //2 - //3 - //4 - //5 - //6 - //7 - //8 - //9 - //10 - //#statefulMapConcatLike + // prints + // 1 + // 2 + // 3 + // 4 + // 5 + // 6 + // 7 + // 8 + // 9 + // 10 + // #statefulMapConcatLike } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala index 1ab908531a..7041c4ca76 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala @@ -13,15 +13,15 @@ object AsPublisher { implicit val ec: ExecutionContextExecutor = system.dispatcher def asPublisherExample() = { def asPublisherExample() = { - //#asPublisher + // #asPublisher val source = Source(1 to 5) val publisher = source.runWith(Sink.asPublisher(false)) Source.fromPublisher(publisher).runWith(Sink.foreach(println)) // 1 2 3 4 5 Source .fromPublisher(publisher) - .runWith(Sink.foreach(println)) //No output, because the source was not able to subscribe to the publisher. - //#asPublisher + .runWith(Sink.foreach(println)) // No output, because the source was not able to subscribe to the publisher. + // #asPublisher } } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala index 7d07489f7f..0614c832f1 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala @@ -14,9 +14,9 @@ object Cancelled { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def cancelledExample(): NotUsed = { - //#cancelled + // #cancelled val source = Source(1 to 5) source.runWith(Sink.cancelled) - //#cancelled + // #cancelled } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala index 383f2943c8..129ccf4057 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala @@ -13,11 +13,11 @@ object Collection { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def collectionExample(): Unit = { - //#collection + // #collection val source = Source(1 to 5) val result: Future[List[Int]] = source.runWith(Sink.collection[Int, List[Int]]) result.foreach(println) - //List(1, 2, 3, 4, 5) - //#collection + // List(1, 2, 3, 4, 5) + // #collection } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala index a78b684c39..35cb62fbf3 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala @@ -13,11 +13,11 @@ object Fold { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def foldExample: Future[Unit] = { - //#fold + // #fold val source = Source(1 to 100) val result: Future[Int] = source.runWith(Sink.fold(0)((acc, element) => acc + element)) result.map(println) - //5050 - //#fold + // 5050 + // #fold } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala index 6f2b829c66..6ec188e9db 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala @@ -13,11 +13,11 @@ object HeadOption { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def headOptionExample(): Unit = { - //#headoption + // #headoption val source = Source.empty val result: Future[Option[Int]] = source.runWith(Sink.headOption) result.foreach(println) - //None - //#headoption + // None + // #headoption } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala index 036fe71d04..40f94f5d1e 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala @@ -17,12 +17,12 @@ object Ignore { implicit val system: ActorSystem = ??? def ignoreExample(): Unit = { - //#ignore + // #ignore val lines: Source[String, NotUsed] = readLinesFromFile() val databaseIds: Source[UUID, NotUsed] = lines.mapAsync(1)(line => saveLineToDatabase(line)) databaseIds.mapAsync(1)(uuid => writeIdToFile(uuid)).runWith(Sink.ignore) - //#ignore + // #ignore } private def readLinesFromFile(): Source[String, NotUsed] = diff --git a/akka-docs/src/test/scala/docs/stream/operators/source/From.scala b/akka-docs/src/test/scala/docs/stream/operators/source/From.scala index 68b0c61e00..5861499933 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/source/From.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/source/From.scala @@ -14,23 +14,23 @@ object From { implicit val system: ActorSystem = null def fromIteratorSample(): Unit = { - //#from-iterator + // #from-iterator Source.fromIterator(() => (1 to 3).iterator).runForeach(println) // could print // 1 // 2 // 3 - //#from-iterator + // #from-iterator } def fromJavaStreamSample(): Unit = { - //#from-javaStream + // #from-javaStream Source.fromJavaStream(() => IntStream.rangeClosed(1, 3)).runForeach(println) // could print // 1 // 2 // 3 - //#from-javaStream + // #from-javaStream } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala b/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala index 8781c7ae42..7a1efddbd0 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala @@ -25,7 +25,7 @@ object Restart extends App { case class CantConnectToDatabase(msg: String) extends RuntimeException(msg) with NoStackTrace def onRestartWithBackoffInnerFailure(): Unit = { - //#restart-failure-inner-failure + // #restart-failure-inner-failure // could throw if for example it used a database connection to get rows val flakySource: Source[() => Int, NotUsed] = Source(List(() => 1, () => 2, () => 3, () => throw CantConnectToDatabase("darn"))) @@ -34,27 +34,27 @@ object Restart extends App { RestartSettings(minBackoff = 1.second, maxBackoff = 10.seconds, randomFactor = 0.1))(() => flakySource) forever.runWith(Sink.foreach(nr => system.log.info("{}", nr()))) // logs - //[INFO] [12/10/2019 13:51:58.300] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 1 - //[INFO] [12/10/2019 13:51:58.301] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 2 - //[INFO] [12/10/2019 13:51:58.302] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 3 - //[WARN] [12/10/2019 13:51:58.310] [default-akka.test.stream-dispatcher-7] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // [INFO] [12/10/2019 13:51:58.300] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:58.301] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:58.302] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:58.310] [default-akka.test.stream-dispatcher-7] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) // --> 1 second gap - //[INFO] [12/10/2019 13:51:59.379] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 - //[INFO] [12/10/2019 13:51:59.382] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 - //[INFO] [12/10/2019 13:51:59.383] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 - //[WARN] [12/10/2019 13:51:59.386] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) - //--> 2 second gap - //[INFO] [12/10/2019 13:52:01.594] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 - //[INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 - //[INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 - //[WARN] [12/10/2019 13:52:01.596] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) - //#restart-failure-inner-failure + // [INFO] [12/10/2019 13:51:59.379] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:59.382] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:59.383] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:59.386] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // --> 2 second gap + // [INFO] [12/10/2019 13:52:01.594] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:52:01.596] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // #restart-failure-inner-failure } def onRestartWithBackoffInnerComplete(): Unit = { - //#restart-failure-inner-complete + // #restart-failure-inner-complete val finiteSource = Source.tick(1.second, 1.second, "tick").take(3) val forever = RestartSource.onFailuresWithBackoff(RestartSettings(1.second, 10.seconds, 0.1))(() => finiteSource) forever.runWith(Sink.foreach(println)) @@ -62,11 +62,11 @@ object Restart extends App { // tick // tick // tick - //#restart-failure-inner-complete + // #restart-failure-inner-complete } def onRestartWitFailureKillSwitch(): Unit = { - //#restart-failure-inner-complete-kill-switch + // #restart-failure-inner-complete-kill-switch val flakySource: Source[() => Int, NotUsed] = Source(List(() => 1, () => 2, () => 3, () => throw CantConnectToDatabase("darn"))) val stopRestarting: UniqueKillSwitch = @@ -75,9 +75,9 @@ object Restart extends App { .viaMat(KillSwitches.single)(Keep.right) .toMat(Sink.foreach(nr => println(s"Nr ${nr()}")))(Keep.left) .run() - //... from some where else + // ... from some where else // stop the source from restarting stopRestarting.shutdown() - //#restart-failure-inner-complete-kill-switch + // #restart-failure-inner-complete-kill-switch } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala index 3557fcf9a6..f862d2e1e8 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala @@ -8,25 +8,25 @@ import akka.NotUsed import akka.stream.scaladsl.Flow object Collect { - //#collect-elements + // #collect-elements trait Message final case class Ping(id: Int) extends Message final case class Pong(id: Int) - //#collect-elements + // #collect-elements def collectExample(): Unit = { - //#collect + // #collect val flow: Flow[Message, Pong, NotUsed] = Flow[Message].collect { case Ping(id) if id != 0 => Pong(id) } - //#collect + // #collect } def collectType(): Unit = { - //#collectType + // #collectType val flow: Flow[Message, Pong, NotUsed] = Flow[Message].collectType[Ping].filter(_.id != 0).map(p => Pong(p.id)) - //#collectType + // #collectType } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala index 724cea9507..39d4ca1839 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala @@ -15,9 +15,9 @@ object CompletionTimeout { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def completionTimeoutExample: Future[Done] = { - //#completionTimeout + // #completionTimeout val source = Source(1 to 10000).map(number => number * number) source.completionTimeout(10.milliseconds).run() - //#completionTimeout + // #completionTimeout } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala index 744750a30b..5d6e792e60 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala @@ -13,7 +13,7 @@ import akka.stream.scaladsl.Source object Conflate { def conflateExample(): Unit = { - //#conflate + // #conflate import scala.concurrent.duration._ Source @@ -21,11 +21,11 @@ object Conflate { .throttle(10, per = 1.second) // faster upstream .conflate((acc, el) => acc + el) // acc: Int, el: Int .throttle(1, per = 1.second) // slow downstream - //#conflate + // #conflate } def conflateWithSeedExample(): Unit = { - //#conflateWithSeed + // #conflateWithSeed import scala.concurrent.duration._ case class Summed(i: Int) { @@ -37,7 +37,7 @@ object Conflate { .throttle(10, per = 1.second) // faster upstream .conflateWithSeed(el => Summed(el))((acc, el) => acc.sum(Summed(el))) // (Summed, Int) => Summed .throttle(1, per = 1.second) // slow downstream - //#conflateWithSeed + // #conflateWithSeed } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala index 5b73754717..3801801f8c 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala @@ -22,7 +22,6 @@ import scala.concurrent.duration._ import scala.util.Random /** - * */ object ExtrapolateAndExpandMain extends App { implicit val sys: ActorSystem = ActorSystem("25fps-stream") @@ -43,9 +42,9 @@ object ExtrapolateAndExpand { // if upstream is too slow, produce copies of the last frame but grayed out. val rateControl: Flow[Frame, Frame, NotUsed] = Flow[Frame].extrapolate((frame: Frame) => { - val grayedOut = frame.withFilter(Gray) - Iterator.continually(grayedOut) - }, Some(Frame.blackFrame)) + val grayedOut = frame.withFilter(Gray) + Iterator.continually(grayedOut) + }, Some(Frame.blackFrame)) val videoSource: Source[Frame, NotUsed] = networkSource.via(decode).via(rateControl) @@ -64,7 +63,7 @@ object ExtrapolateAndExpand { Flow[Frame].expand((frame: Frame) => { val watermarked = frame.withFilter(Watermark) val grayedOut = frame.withFilter(Gray) - (Iterator.single(watermarked) ++ Iterator.continually(grayedOut)) + Iterator.single(watermarked) ++ Iterator.continually(grayedOut) }) val watermarkedVideoSource: Source[Frame, NotUsed] = diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala index c8a63e14b1..566c2578a3 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala @@ -11,17 +11,17 @@ import akka.stream.scaladsl.Source //#imports object Fold extends App { - //#histogram + // #histogram case class Histogram(low: Long = 0, high: Long = 0) { def add(i: Int): Histogram = if (i < 100) copy(low = low + 1) else copy(high = high + 1) } - //#histogram + // #histogram implicit val sys: ActorSystem = ActorSystem() - //#fold + // #fold Source(1 to 150).fold(Histogram())((acc, n) => acc.add(n)).runForeach(println) // Prints: Histogram(99,51) - //#fold + // #fold } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala index 27fa1d9990..4001f0620a 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala @@ -16,14 +16,15 @@ object FoldAsync extends App { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#foldAsync + // #foldAsync case class Histogram(low: Long = 0, high: Long = 0) { def add(i: Int): Future[Histogram] = - if (i < 100) Future { copy(low = low + 1) } else Future { copy(high = high + 1) } + if (i < 100) Future { copy(low = low + 1) } + else Future { copy(high = high + 1) } } Source(1 to 150).foldAsync(Histogram())((acc, n) => acc.add(n)).runForeach(println) // Prints: Histogram(99,51) - //#foldAsync + // #foldAsync } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala index 80c80dda7a..5888d1d564 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala @@ -11,15 +11,15 @@ object GroupBy { def groupBySourceExample(): Unit = { implicit val system: ActorSystem = ??? - //#groupBy + // #groupBy Source(1 to 10) .groupBy(maxSubstreams = 2, _ % 2 == 0) // create two sub-streams with odd and even numbers .reduce(_ + _) // for each sub-stream, sum its elements .mergeSubstreams // merge back into a stream .runForeach(println) - //25 - //30 - //#groupBy + // 25 + // 30 + // #groupBy } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala index bd03a1bc19..5351434ccb 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala @@ -11,7 +11,7 @@ object Grouped { implicit val system: ActorSystem = ActorSystem() - //#grouped + // #grouped Source(1 to 7).grouped(3).runForeach(println) // Vector(1, 2, 3) // Vector(4, 5, 6) @@ -21,7 +21,7 @@ object Grouped { // 6 (= 1 + 2 + 3) // 15 (= 4 + 5 + 6) // 7 (= 7) - //#grouped + // #grouped } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala index b7ec48a57a..42d32d919a 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala @@ -12,7 +12,7 @@ object GroupedWeighted { implicit val system: ActorSystem = ActorSystem() - //#groupedWeighted + // #groupedWeighted val collections = immutable.Iterable(Seq(1, 2), Seq(3, 4), Seq(5, 6)) Source[Seq[Int]](collections).groupedWeighted(4)(_.length).runForeach(println) // Vector(Seq(1, 2), Seq(3, 4)) @@ -21,7 +21,7 @@ object GroupedWeighted { Source[Seq[Int]](collections).groupedWeighted(3)(_.length).runForeach(println) // Vector(Seq(1, 2), Seq(3, 4)) // Vector(Seq(5, 6)) - //#groupedWeighted + // #groupedWeighted } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala index 67d6abd875..bc47ff86ea 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala @@ -12,11 +12,11 @@ object Intersperse extends App { implicit val system: ActorSystem = ActorSystem() - //#intersperse + // #intersperse Source(1 to 4).map(_.toString).intersperse("[", ", ", "]").runWith(Sink.foreach(print)) // prints // [1, 2, 3, 4] - //#intersperse + // #intersperse system.terminate() } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala index c304608bbd..d07ec8ae7d 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala @@ -13,13 +13,13 @@ import akka.stream.Attributes object Log { def logExample(): Unit = { Flow[String] - //#log + // #log .log(name = "myStream") .addAttributes( Attributes.logLevels( onElement = Attributes.LogLevels.Off, onFinish = Attributes.LogLevels.Info, onFailure = Attributes.LogLevels.Error)) - //#log + // #log } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala index 791aaf36c2..b4e981b54b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala @@ -14,13 +14,13 @@ import akka.stream.Attributes object LogWithMarker { def logWithMarkerExample(): Unit = { Flow[String] - //#logWithMarker + // #logWithMarker .logWithMarker(name = "myStream", e => LogMarker(name = "myMarker", properties = Map("element" -> e))) .addAttributes( Attributes.logLevels( onElement = Attributes.LogLevels.Off, onFinish = Attributes.LogLevels.Info, onFailure = Attributes.LogLevels.Error)) - //#logWithMarker + // #logWithMarker } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala index 84789769f7..9bcfd42883 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala @@ -16,7 +16,6 @@ import scala.concurrent.duration._ import scala.util.Random /** - * */ object CommonMapAsync { case class Event(sequenceNumber: Int) @@ -29,7 +28,7 @@ object CommonMapAsync { // #mapasync-concurrent // #mapasyncunordered - val events: Source[Event, NotUsed] = //... + val events: Source[Event, NotUsed] = // ... // #mapasync-strict-order // #mapasync-concurrent // #mapasyncunordered @@ -43,7 +42,7 @@ object CommonMapAsync { def eventHandler(event: Event): Future[Int] = { println(s"Processing event $event...") - //... + // ... // #mapasync-strict-order // #mapasync-concurrent // #mapasyncunordered diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala index be23b2e66f..eecd24e3f7 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala @@ -16,7 +16,7 @@ object MapConcat { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#map-concat + // #map-concat def duplicate(i: Int): List[Int] = List(i, i) Source(1 to 3).mapConcat(i => duplicate(i)).runForeach(println) @@ -27,7 +27,7 @@ object MapConcat { // 2 // 3 // 3 - //#map-concat + // #map-concat } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala index b260af35d3..a3058e3cd2 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala @@ -16,7 +16,7 @@ object MapError extends App { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#map-error + // #map-error Source(-1 to 1) .map(1 / _) .mapError { @@ -30,6 +30,6 @@ object MapError extends App { } // prints "Divide by Zero Operation is not supported." - //#map-error + // #map-error } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala index 509ccdbccd..4cc4545768 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala @@ -9,7 +9,7 @@ import akka.stream.scaladsl.Source object MergeLatest extends App { implicit val system: ActorSystem = ActorSystem() - //#mergeLatest + // #mergeLatest val prices = Source(List(100, 101, 99, 103)) val quantity = Source(List(1, 3, 4, 2)) @@ -28,5 +28,5 @@ object MergeLatest extends App { // 396 // 412 // 206 - //#mergeLatest + // #mergeLatest } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala index 93d31949ea..f478e23ec0 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala @@ -13,11 +13,11 @@ object Reduce { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def reduceExample: Future[Unit] = { - //#reduceExample + // #reduceExample val source = Source(1 to 100).reduce((acc, element) => acc + element) val result: Future[Int] = source.runWith(Sink.head) result.map(println) - //5050 - //#reduceExample + // 5050 + // #reduceExample } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala index 6163e0df32..eac882ab5c 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala @@ -11,7 +11,7 @@ object Scan { implicit val system: ActorSystem = ActorSystem() - //#scan + // #scan val source = Source(1 to 5) source.scan(0)((acc, x) => acc + x).runForeach(println) // 0 (= 0) @@ -20,7 +20,7 @@ object Scan { // 6 (= 0 + 1 + 2 + 3) // 10 (= 0 + 1 + 2 + 3 + 4) // 15 (= 0 + 1 + 2 + 3 + 4 + 5) - //#scan + // #scan } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala index 5667b4d950..2be552e1c2 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala @@ -17,7 +17,7 @@ object ScanAsync { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#scan-async + // #scan-async def asyncFunction(acc: Int, next: Int): Future[Int] = Future { acc + next } @@ -30,7 +30,7 @@ object ScanAsync { // 6 (= 0 + 1 + 2 + 3) // 10 (= 0 + 1 + 2 + 3 + 4) // 15 (= 0 + 1 + 2 + 3 + 4 + 5) - //#scan-async + // #scan-async } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala index 0b451d2373..f4cec91f2b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala @@ -11,28 +11,28 @@ object Sliding { implicit val system: ActorSystem = ??? def slidingExample1(): Unit = { - //#sliding-1 + // #sliding-1 val source = Source(1 to 4) source.sliding(2).runForeach(println) // prints: // Vector(1, 2) // Vector(2, 3) // Vector(3, 4) - //#sliding-1 + // #sliding-1 } def slidingExample2(): Unit = { - //#sliding-2 + // #sliding-2 val source = Source(1 to 4) source.sliding(n = 3, step = 2).runForeach(println) // prints: // Vector(1, 2, 3) // Vector(3, 4) - shorter because stream ended before we got 3 elements - //#sliding-2 + // #sliding-2 } def slidingExample3(): Unit = { - //#moving-average + // #moving-average val numbers = Source(1 :: 3 :: 10 :: 2 :: 3 :: 4 :: 2 :: 10 :: 11 :: Nil) val movingAverage = numbers.sliding(5).map(window => window.sum.toFloat / window.size) movingAverage.runForeach(println) @@ -42,7 +42,7 @@ object Sliding { // 4.2 = average of 10, 2, 3, 4, 2 // 4.2 = average of 2, 3, 4, 2, 10 // 6.0 = average of 3, 4, 2, 10, 11 - //#moving-average + // #moving-average } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala index 25d828e00a..f46b48fcca 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala @@ -19,7 +19,7 @@ object Split { implicit val system: ActorSystem = ActorSystem() - //#splitWhen + // #splitWhen Source(1 to 100) .throttle(1, 100.millis) .map(elem => (elem, Instant.now())) @@ -54,7 +54,7 @@ object Split { // 10 // 10 // 7 - //#splitWhen + // #splitWhen } def splitAfterExample(args: Array[String]): Unit = { @@ -62,7 +62,7 @@ object Split { implicit val system: ActorSystem = ActorSystem() - //#splitAfter + // #splitAfter Source(1 to 100) .throttle(1, 100.millis) .map(elem => (elem, Instant.now())) @@ -95,7 +95,7 @@ object Split { // 6 // note that the very last element is never included due to sliding, // but that would not be problem for an infinite stream - //#splitAfter + // #splitAfter } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala index 5efb3586ac..ef74cfaff9 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala @@ -13,7 +13,6 @@ import akka.stream.scaladsl.Source import scala.concurrent.duration._ /** - * */ object Throttle extends App { diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala index cdaa049c9c..0c7b31f9ff 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala @@ -14,13 +14,13 @@ object Watch { def someActor(): ActorRef = ??? def watchExample(): Unit = { - //#watch + // #watch val ref: ActorRef = someActor() val flow: Flow[String, String, NotUsed] = Flow[String].watch(ref).recover { case _: WatchedActorTerminatedException => s"$ref terminated" } - //#watch + // #watch } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala index cdeaaf4300..aee36119ce 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala @@ -16,16 +16,15 @@ object WatchTermination { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContext = ??? - //#watchTermination + // #watchTermination Source(1 to 5) - .watchTermination()( - (prevMatValue, future) => - // this function will be run when the stream terminates - // the Future provided as a second parameter indicates whether the stream completed successfully or failed - future.onComplete { - case Failure(exception) => println(exception.getMessage) - case Success(_) => println(s"The stream materialized $prevMatValue") - }) + .watchTermination()((prevMatValue, future) => + // this function will be run when the stream terminates + // the Future provided as a second parameter indicates whether the stream completed successfully or failed + future.onComplete { + case Failure(exception) => println(exception.getMessage) + case Success(_) => println(s"The stream materialized $prevMatValue") + }) .runForeach(println) /* Prints: @@ -50,6 +49,6 @@ object WatchTermination { 2 Boom */ - //#watchTermination + // #watchTermination } } diff --git a/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala b/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala index f7479514ca..767a4eba53 100644 --- a/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala @@ -105,23 +105,23 @@ class ParentChildSpec extends AnyWordSpec with Matchers with TestKitBase with Be "A GenericDependentParent" should { "be tested with a child probe" in { val probe = TestProbe() - //#child-maker-test + // #child-maker-test val maker = (_: ActorRefFactory) => probe.ref val parent = system.actorOf(Props(new GenericDependentParent(maker))) - //#child-maker-test + // #child-maker-test probe.send(parent, "pingit") probe.expectMsg("ping") } "demonstrate production version of child creator" in { - //#child-maker-prod + // #child-maker-prod val maker = (f: ActorRefFactory) => f.actorOf(Props(new Child)) val parent = system.actorOf(Props(new GenericDependentParent(maker))) - //#child-maker-prod + // #child-maker-prod } } - //#test-TestProbe-parent + // #test-TestProbe-parent "A TestProbe serving as parent" should { "test its child responses" in { val parent = TestProbe() @@ -130,9 +130,9 @@ class ParentChildSpec extends AnyWordSpec with Matchers with TestKitBase with Be parent.expectMsg("pong") } } - //#test-TestProbe-parent + // #test-TestProbe-parent - //#test-fabricated-parent + // #test-fabricated-parent "A fabricated parent" should { "test its child responses" in { val proxy = TestProbe() @@ -148,5 +148,5 @@ class ParentChildSpec extends AnyWordSpec with Matchers with TestKitBase with Be proxy.expectMsg("pong") } } - //#test-fabricated-parent + // #test-fabricated-parent } diff --git a/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala b/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala index b7679559f6..7ae20465df 100644 --- a/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala @@ -18,7 +18,7 @@ class MySpec() with AnyWordSpecLike with Matchers with BeforeAndAfterAll { - //#implicit-sender + // #implicit-sender override def afterAll(): Unit = { TestKit.shutdownActorSystem(system) diff --git a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala index 69f4baa6d7..41442e20c6 100644 --- a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala @@ -40,7 +40,7 @@ object TestKitDocSpec { } } - //#my-double-echo + // #my-double-echo class MyDoubleEcho extends Actor { var dest1: ActorRef = _ var dest2: ActorRef = _ @@ -54,9 +54,9 @@ object TestKitDocSpec { } } - //#my-double-echo + // #my-double-echo - //#test-probe-forward-actors + // #test-probe-forward-actors class Source(target: ActorRef) extends Actor { def receive = { case "start" => target ! "work" @@ -69,9 +69,9 @@ object TestKitDocSpec { } } - //#test-probe-forward-actors + // #test-probe-forward-actors - //#timer + // #timer case class TriggerScheduling(foo: String) object SchedKey @@ -85,10 +85,10 @@ object TestKitDocSpec { def triggerScheduling(msg: ScheduledMessage) = timers.startSingleTimer(SchedKey, msg, 500.millis) } - //#timer + // #timer class LoggingActor extends Actor { - //#logging-receive + // #logging-receive import akka.event.LoggingReceive def receive = LoggingReceive { case msg => // Do something ... @@ -96,7 +96,7 @@ object TestKitDocSpec { def otherState: Receive = LoggingReceive.withLabel("other") { case msg => // Do something else ... } - //#logging-receive + // #logging-receive } } @@ -104,12 +104,12 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { import TestKitDocSpec._ "demonstrate usage of TestActorRef" in { - //#test-actor-ref + // #test-actor-ref import akka.testkit.TestActorRef val actorRef = TestActorRef[MyActor] val actor = actorRef.underlyingActor - //#test-actor-ref + // #test-actor-ref } "demonstrate built-in expect methods" in { @@ -119,25 +119,25 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { testActor.tell("hello", ActorRef.noSender) testActor.tell("world", ActorRef.noSender) testActor.tell(42, ActorRef.noSender) - //#test-expect + // #test-expect val hello: String = expectMsg("hello") val any: String = expectMsgAnyOf("hello", "world") val all: immutable.Seq[String] = expectMsgAllOf("hello", "world") val i: Int = expectMsgType[Int] expectNoMessage(200.millis) - //#test-expect + // #test-expect testActor.tell("receveN-1", ActorRef.noSender) testActor.tell("receveN-2", ActorRef.noSender) - //#test-expect + // #test-expect val two: immutable.Seq[AnyRef] = receiveN(2) - //#test-expect + // #test-expect assert("hello" == hello) assert("hello" == any) assert(42 == i) } "demonstrate usage of TestFSMRef" in { - //#test-fsm-ref + // #test-fsm-ref import akka.testkit.TestFSMRef import scala.concurrent.duration._ @@ -159,12 +159,12 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { assert(fsm.isTimerActive("test") == true) fsm.cancelTimer("test") assert(fsm.isTimerActive("test") == false) - //#test-fsm-ref + // #test-fsm-ref } "demonstrate testing of behavior" in { - //#test-behavior + // #test-behavior import akka.testkit.TestActorRef import akka.pattern.ask @@ -172,21 +172,21 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { // hypothetical message stimulating a '42' answer val future: Future[Any] = actorRef ? Say42 future.futureValue should be(42) - //#test-behavior + // #test-behavior } "demonstrate unhandled message" in { - //#test-unhandled + // #test-unhandled import akka.testkit.TestActorRef system.eventStream.subscribe(testActor, classOf[UnhandledMessage]) val ref = TestActorRef[MyActor] ref.receive(Unknown) expectMsg(1 second, UnhandledMessage(Unknown, system.deadLetters, ref)) - //#test-unhandled + // #test-unhandled } "demonstrate expecting exceptions" in { - //#test-expecting-exceptions + // #test-expecting-exceptions import akka.testkit.TestActorRef val actorRef = TestActorRef(new Actor { @@ -195,12 +195,12 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } }) intercept[IllegalArgumentException] { actorRef.receive("hello") } - //#test-expecting-exceptions + // #test-expecting-exceptions } "demonstrate within" in { type Worker = MyActor - //#test-within + // #test-within import akka.actor.Props import scala.concurrent.duration._ @@ -211,19 +211,19 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { expectNoMessage() // will block for the rest of the 200ms Thread.sleep(300) // will NOT make this block fail } - //#test-within + // #test-within } "demonstrate dilated duration" in { - //#duration-dilation + // #duration-dilation import scala.concurrent.duration._ import akka.testkit._ 10.milliseconds.dilated - //#duration-dilation + // #duration-dilation } "demonstrate usage of probe" in { - //#test-probe + // #test-probe val probe1 = TestProbe() val probe2 = TestProbe() val actor = system.actorOf(Props[MyDoubleEcho]()) @@ -231,9 +231,9 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! "hello" probe1.expectMsg(500 millis, "hello") probe2.expectMsg(500 millis, "hello") - //#test-probe + // #test-probe - //#test-special-probe + // #test-special-probe final case class Update(id: Int, value: String) val probe = new TestProbe(system) { @@ -244,58 +244,58 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { sender() ! "ACK" } } - //#test-special-probe + // #test-special-probe } "demonstrate usage of test probe with custom name" in { - //#test-probe-with-custom-name + // #test-probe-with-custom-name val worker = TestProbe("worker") val aggregator = TestProbe("aggregator") worker.ref.path.name should startWith("worker") aggregator.ref.path.name should startWith("aggregator") - //#test-probe-with-custom-name + // #test-probe-with-custom-name } "demonstrate probe watch" in { import akka.testkit.TestProbe val target = system.actorOf(Props.empty) - //#test-probe-watch + // #test-probe-watch val probe = TestProbe() probe.watch(target) target ! PoisonPill probe.expectTerminated(target) - //#test-probe-watch + // #test-probe-watch } "demonstrate probe reply" in { import akka.testkit.TestProbe import scala.concurrent.duration._ import akka.pattern.ask - //#test-probe-reply + // #test-probe-reply val probe = TestProbe() val future = probe.ref ? "hello" probe.expectMsg(0 millis, "hello") // TestActor runs on CallingThreadDispatcher probe.reply("world") assert(future.isCompleted && future.value.contains(Success("world"))) - //#test-probe-reply + // #test-probe-reply } "demonstrate probe forward" in { import akka.testkit.TestProbe import akka.actor.Props - //#test-probe-forward + // #test-probe-forward val probe = TestProbe() val source = system.actorOf(Props(classOf[Source], probe.ref)) val dest = system.actorOf(Props[Destination]()) source ! "start" probe.expectMsg("work") probe.forward(dest) - //#test-probe-forward + // #test-probe-forward } "demonstrate using inheritance to test timers" in { - //#timer-test + // #timer-test import akka.testkit.TestProbe import akka.actor.Props @@ -307,18 +307,18 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! TriggerScheduling("abc") probe.expectMsg(ScheduledMessage("abc")) - //#timer-test + // #timer-test } "demonstrate calling thread dispatcher" in { - //#calling-thread-dispatcher + // #calling-thread-dispatcher import akka.testkit.CallingThreadDispatcher val ref = system.actorOf(Props[MyActor]().withDispatcher(CallingThreadDispatcher.Id)) - //#calling-thread-dispatcher + // #calling-thread-dispatcher } "demonstrate EventFilter" in { - //#event-filter + // #event-filter import akka.testkit.EventFilter import com.typesafe.config.ConfigFactory @@ -335,36 +335,36 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } finally { shutdown(system) } - //#event-filter + // #event-filter } "demonstrate TestKitBase" in { - //#test-kit-base + // #test-kit-base import akka.testkit.TestKitBase class MyTest extends TestKitBase { implicit lazy val system: ActorSystem = ActorSystem() - //#put-your-test-code-here + // #put-your-test-code-here val probe = TestProbe() probe.send(testActor, "hello") try expectMsg("hello") catch { case NonFatal(e) => system.terminate(); throw e } - //#put-your-test-code-here + // #put-your-test-code-here shutdown(system) } - //#test-kit-base + // #test-kit-base } "demonstrate within() nesting" in { intercept[AssertionError] { - //#test-within-probe + // #test-within-probe val probe = TestProbe() within(1 second) { probe.expectMsg("hello") } - //#test-within-probe + // #test-within-probe } } diff --git a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala index a7f1596ae5..16c98bd25c 100644 --- a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala +++ b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala @@ -165,17 +165,17 @@ class ActorHierarchyExperiments extends ScalaTestWithActorTestKit with AnyWordSp def context = this "start and stop actors" in { - //#start-stop-main + // #start-stop-main val first = context.spawn(StartStopActor1(), "first") first ! "stop" - //#start-stop-main + // #start-stop-main } "supervise actors" in { - //#supervise-main + // #supervise-main val supervisingActor = context.spawn(SupervisingActor(), "supervising-actor") supervisingActor ! "failChild" - //#supervise-main + // #supervise-main Thread.sleep(200) // allow for the println/logging to complete } } diff --git a/akka-docs/src/test/scala/typed/tutorial_3/Device.scala b/akka-docs/src/test/scala/typed/tutorial_3/Device.scala index 7c85b9a7a2..e7fcb551e2 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/Device.scala @@ -23,11 +23,11 @@ object Device { final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends Command final case class RespondTemperature(requestId: Long, value: Option[Double]) - //#write-protocol + // #write-protocol final case class RecordTemperature(requestId: Long, value: Double, replyTo: ActorRef[TemperatureRecorded]) extends Command final case class TemperatureRecorded(requestId: Long) - //#write-protocol + // #write-protocol } class Device(context: ActorContext[Device.Command], groupId: String, deviceId: String) diff --git a/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala b/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala index c079546f05..c093320ea2 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala @@ -16,7 +16,7 @@ import akka.actor.typed.Signal object DeviceInProgress1 { - //#read-protocol-1 + // #read-protocol-1 import akka.actor.typed.ActorRef object Device { @@ -24,14 +24,14 @@ object DeviceInProgress1 { final case class ReadTemperature(replyTo: ActorRef[RespondTemperature]) extends Command final case class RespondTemperature(value: Option[Double]) } - //#read-protocol-1 + // #read-protocol-1 } object DeviceInProgress2 { import akka.actor.typed.ActorRef - //#device-with-read + // #device-with-read import akka.actor.typed.Behavior import akka.actor.typed.scaladsl.AbstractBehavior import akka.actor.typed.scaladsl.ActorContext @@ -42,11 +42,11 @@ object DeviceInProgress2 { def apply(groupId: String, deviceId: String): Behavior[Command] = Behaviors.setup(context => new Device(context, groupId, deviceId)) - //#read-protocol-2 + // #read-protocol-2 sealed trait Command final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends Command final case class RespondTemperature(requestId: Long, value: Option[Double]) - //#read-protocol-2 + // #read-protocol-2 } class Device(context: ActorContext[Device.Command], groupId: String, deviceId: String) @@ -72,16 +72,16 @@ object DeviceInProgress2 { } } - //#device-with-read + // #device-with-read } object DeviceInProgress3 { object Device { - //#write-protocol-1 + // #write-protocol-1 sealed trait Command final case class RecordTemperature(value: Double) extends Command - //#write-protocol-1 + // #write-protocol-1 } } diff --git a/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala b/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala index 7293da291c..96b5478aeb 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala @@ -22,9 +22,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response.requestId should ===(42) response.value should ===(None) } - //#device-read-test + // #device-read-test - //#device-write-read-test + // #device-write-read-test "reply with latest temperature reading" in { val recordProbe = createTestProbe[TemperatureRecorded]() val readProbe = createTestProbe[RespondTemperature]() @@ -46,9 +46,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } - //#device-write-read-test + // #device-write-read-test } - //#device-read-test + // #device-read-test } //#device-read-test diff --git a/akka-docs/src/test/scala/typed/tutorial_4/Device.scala b/akka-docs/src/test/scala/typed/tutorial_4/Device.scala index 0afd2dfe64..5895d2ad15 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/Device.scala @@ -27,9 +27,9 @@ object Device { extends Command final case class TemperatureRecorded(requestId: Long) - //#passivate-msg + // #passivate-msg case object Passivate extends Command - //#passivate-msg + // #passivate-msg } class Device(context: ActorContext[Device.Command], groupId: String, deviceId: String) diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala index e0f8dc7cd9..5273ece13c 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala @@ -47,9 +47,9 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) case None => context.log.info("Creating device actor for {}", trackMsg.deviceId) val deviceActor = context.spawn(Device(groupId, deviceId), s"device-$deviceId") - //#device-group-register + // #device-group-register context.watchWith(deviceActor, DeviceTerminated(deviceActor, groupId, deviceId)) - //#device-group-register + // #device-group-register deviceIdToActor += deviceId -> deviceActor replyTo ! DeviceRegistered(deviceActor) } @@ -58,8 +58,8 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) case RequestTrackDevice(gId, _, _) => context.log.warn2("Ignoring TrackDevice request for {}. This actor is responsible for {}.", gId, groupId) this - //#device-group-register - //#device-group-remove + // #device-group-register + // #device-group-remove case RequestDeviceList(requestId, gId, replyTo) => if (gId == groupId) { @@ -67,14 +67,14 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) this } else Behaviors.unhandled - //#device-group-remove + // #device-group-remove case DeviceTerminated(_, _, deviceId) => context.log.info("Device actor for {} has been terminated", deviceId) deviceIdToActor -= deviceId this - //#device-group-register + // #device-group-register } override def onSignal: PartialFunction[Signal, Behavior[Command]] = { diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala index cd62bf4948..cefcf1ec2b 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala @@ -14,7 +14,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "DeviceGroup actor" must { - //#device-group-test-registration + // #device-group-test-registration "be able to register a device actor" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -44,9 +44,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { groupActor ! RequestTrackDevice("wrongGroup", "device1", probe.ref) probe.expectNoMessage(500.milliseconds) } - //#device-group-test-registration + // #device-group-test-registration - //#device-group-test3 + // #device-group-test3 "return same actor for same deviceId" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -60,9 +60,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { registered1.device should ===(registered2.device) } - //#device-group-test3 + // #device-group-test3 - //#device-group-list-terminate-test + // #device-group-list-terminate-test "be able to list active devices" in { val registeredProbe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -103,7 +103,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { deviceListProbe.expectMessage(ReplyDeviceList(requestId = 1, Set("device2"))) } } - //#device-group-list-terminate-test + // #device-group-list-terminate-test } diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala index b13653c01a..67de161e7e 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala @@ -17,28 +17,28 @@ object DeviceManager { def apply(): Behavior[Command] = Behaviors.setup(context => new DeviceManager(context)) - //#device-manager-msgs + // #device-manager-msgs sealed trait Command - //#device-registration-msgs + // #device-registration-msgs final case class RequestTrackDevice(groupId: String, deviceId: String, replyTo: ActorRef[DeviceRegistered]) extends DeviceManager.Command with DeviceGroup.Command final case class DeviceRegistered(device: ActorRef[Device.Command]) - //#device-registration-msgs + // #device-registration-msgs - //#device-list-msgs + // #device-list-msgs final case class RequestDeviceList(requestId: Long, groupId: String, replyTo: ActorRef[ReplyDeviceList]) extends DeviceManager.Command with DeviceGroup.Command final case class ReplyDeviceList(requestId: Long, ids: Set[String]) - //#device-list-msgs + // #device-list-msgs private final case class DeviceGroupTerminated(groupId: String) extends DeviceManager.Command - //#device-manager-msgs + // #device-manager-msgs } class DeviceManager(context: ActorContext[DeviceManager.Command]) diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala index 93e768293a..b83ffbdb43 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala @@ -12,7 +12,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "Device actor" must { - //#device-read-test + // #device-read-test "reply with empty reading if no temperature is known" in { val probe = createTestProbe[RespondTemperature]() val deviceActor = spawn(Device("group", "device")) @@ -22,9 +22,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response.requestId should ===(42) response.value should ===(None) } - //#device-read-test + // #device-read-test - //#device-write-read-test + // #device-write-read-test "reply with latest temperature reading" in { val recordProbe = createTestProbe[TemperatureRecorded]() val readProbe = createTestProbe[RespondTemperature]() @@ -46,7 +46,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } - //#device-write-read-test + // #device-write-read-test } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/Device.scala b/akka-docs/src/test/scala/typed/tutorial_5/Device.scala index 49332d2e3f..628cc8b88f 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/Device.scala @@ -21,9 +21,9 @@ object Device { sealed trait Command final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends Command - //#respond-declare + // #respond-declare final case class RespondTemperature(requestId: Long, deviceId: String, value: Option[Double]) - //#respond-declare + // #respond-declare final case class RecordTemperature(requestId: Long, value: Double, replyTo: ActorRef[TemperatureRecorded]) extends Command final case class TemperatureRecorded(requestId: Long) @@ -46,11 +46,11 @@ class Device(context: ActorContext[Device.Command], groupId: String, deviceId: S lastTemperatureReading = Some(value) replyTo ! TemperatureRecorded(id) this - //#respond-reply + // #respond-reply case ReadTemperature(id, replyTo) => replyTo ! RespondTemperature(id, deviceId, lastTemperatureReading) this - //#respond-reply + // #respond-reply case Passivate => Behaviors.stopped } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala index 6161847aa9..17a9de5e6e 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala @@ -43,7 +43,7 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) override def onMessage(msg: Command): Behavior[Command] = msg match { - //#query-added + // #query-added case trackMsg @ RequestTrackDevice(`groupId`, deviceId, replyTo) => deviceIdToActor.get(deviceId) match { case Some(deviceActor) => @@ -51,9 +51,9 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) case None => context.log.info("Creating device actor for {}", trackMsg.deviceId) val deviceActor = context.spawn(Device(groupId, deviceId), s"device-$deviceId") - //#device-group-register + // #device-group-register context.watchWith(deviceActor, DeviceTerminated(deviceActor, groupId, deviceId)) - //#device-group-register + // #device-group-register deviceIdToActor += deviceId -> deviceActor replyTo ! DeviceRegistered(deviceActor) } @@ -69,14 +69,14 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) this } else Behaviors.unhandled - //#device-group-remove + // #device-group-remove case DeviceTerminated(_, _, deviceId) => context.log.info("Device actor for {} has been terminated", deviceId) deviceIdToActor -= deviceId this - //#query-added + // #query-added // ... other cases omitted case RequestAllTemperatures(requestId, gId, replyTo) => diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala index 9a49e454ff..fdf8a88981 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala @@ -59,13 +59,13 @@ class DeviceGroupQuery( private val respondTemperatureAdapter = context.messageAdapter(WrappedRespondTemperature.apply) - //#query-outline - //#query-state + // #query-outline + // #query-state private var repliesSoFar = Map.empty[String, TemperatureReading] private var stillWaiting = deviceIdToActor.keySet - //#query-state - //#query-outline + // #query-state + // #query-outline deviceIdToActor.foreach { case (deviceId, device) => @@ -73,8 +73,8 @@ class DeviceGroupQuery( device ! Device.ReadTemperature(0, respondTemperatureAdapter) } - //#query-outline - //#query-state + // #query-outline + // #query-state override def onMessage(msg: Command): Behavior[Command] = msg match { case WrappedRespondTemperature(response) => onRespondTemperature(response) @@ -108,9 +108,9 @@ class DeviceGroupQuery( stillWaiting = Set.empty respondWhenAllCollected() } - //#query-state + // #query-state - //#query-collect-reply + // #query-collect-reply private def respondWhenAllCollected(): Behavior[Command] = { if (stillWaiting.isEmpty) { requester ! RespondAllTemperatures(requestId, repliesSoFar) @@ -119,8 +119,8 @@ class DeviceGroupQuery( this } } - //#query-collect-reply - //#query-outline + // #query-collect-reply + // #query-outline } //#query-outline //#query-full diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala index da3828983c..8a10b5fc2e 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala @@ -19,7 +19,7 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik "DeviceGroupQuery" must { - //#query-test-normal + // #query-test-normal "return temperature value for working devices" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -42,9 +42,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0)))) } - //#query-test-normal + // #query-test-normal - //#query-test-no-reading + // #query-test-no-reading "return TemperatureNotAvailable for devices with no readings" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -67,9 +67,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> TemperatureNotAvailable, "device2" -> Temperature(2.0)))) } - //#query-test-no-reading + // #query-test-no-reading - //#query-test-stopped + // #query-test-stopped "return DeviceNotAvailable if device stops before answering" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -93,9 +93,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(2.0), "device2" -> DeviceNotAvailable))) } - //#query-test-stopped + // #query-test-stopped - //#query-test-stopped-later + // #query-test-stopped-later "return temperature reading even if device stops after answering" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -120,9 +120,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0)))) } - //#query-test-stopped-later + // #query-test-stopped-later - //#query-test-timeout + // #query-test-timeout "return DeviceTimedOut if device does not answer in time" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -146,7 +146,7 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(1.0), "device2" -> DeviceTimedOut))) } - //#query-test-timeout + // #query-test-timeout } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala index ca20e70423..d64363dc41 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala @@ -14,7 +14,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "DeviceGroup actor" must { - //#device-group-test-registration + // #device-group-test-registration "be able to register a device actor" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -44,9 +44,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { groupActor ! RequestTrackDevice("wrongGroup", "device1", probe.ref) probe.expectNoMessage(500.milliseconds) } - //#device-group-test-registration + // #device-group-test-registration - //#device-group-test3 + // #device-group-test3 "return same actor for same deviceId" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -60,9 +60,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { registered1.device should ===(registered2.device) } - //#device-group-test3 + // #device-group-test3 - //#device-group-list-terminate-test + // #device-group-list-terminate-test "be able to list active devices" in { val registeredProbe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -103,9 +103,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { deviceListProbe.expectMessage(ReplyDeviceList(requestId = 1, Set("device2"))) } } - //#device-group-list-terminate-test + // #device-group-list-terminate-test - //#group-query-integration-test + // #group-query-integration-test "be able to collect temperatures from all active devices" in { val registeredProbe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -135,7 +135,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0), "device3" -> TemperatureNotAvailable))) } - //#group-query-integration-test + // #group-query-integration-test } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala index 15153de1ad..a5abbab9e9 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala @@ -17,7 +17,7 @@ object DeviceManager { def apply(): Behavior[Command] = Behaviors.setup(context => new DeviceManager(context)) - //#device-manager-msgs + // #device-manager-msgs sealed trait Command @@ -34,9 +34,9 @@ object DeviceManager { final case class ReplyDeviceList(requestId: Long, ids: Set[String]) private final case class DeviceGroupTerminated(groupId: String) extends DeviceManager.Command - //#device-manager-msgs + // #device-manager-msgs - //#query-protocol + // #query-protocol final case class RequestAllTemperatures(requestId: Long, groupId: String, replyTo: ActorRef[RespondAllTemperatures]) extends DeviceGroupQuery.Command @@ -50,7 +50,7 @@ object DeviceManager { case object TemperatureNotAvailable extends TemperatureReading case object DeviceNotAvailable extends TemperatureReading case object DeviceTimedOut extends TemperatureReading - //#query-protocol + // #query-protocol } class DeviceManager(context: ActorContext[DeviceManager.Command]) diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala index 7a0f4c0a9c..b828403941 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala @@ -12,7 +12,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "Device actor" must { - //#device-read-test + // #device-read-test "reply with empty reading if no temperature is known" in { val probe = createTestProbe[RespondTemperature]() val deviceActor = spawn(Device("group", "device")) @@ -22,9 +22,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response.requestId should ===(42) response.value should ===(None) } - //#device-read-test + // #device-read-test - //#device-write-read-test + // #device-write-read-test "reply with latest temperature reading" in { val recordProbe = createTestProbe[TemperatureRecorded]() val readProbe = createTestProbe[RespondTemperature]() @@ -46,7 +46,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } - //#device-write-read-test + // #device-write-read-test } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala index 320453a3ca..bb61b08ebe 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -498,7 +498,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP case GetAddress(node) => if (nodes contains node) sender() ! ToClient(AddressReply(node, nodes(node).addr)) else addrInterest += node -> ((addrInterest.get(node).getOrElse(Set())) + sender()) - case _: Done => //FIXME what should happen? + case _: Done => // FIXME what should happen? } case op: CommandOp => op match { diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala index aacda59dc6..017ef28d29 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala @@ -51,7 +51,6 @@ object TestConductor extends ExtensionId[TestConductorExt] with ExtensionIdProvi * To use ``blackhole``, ``passThrough``, and ``throttle`` you must activate the * failure injector and throttler transport adapters by specifying `testTransport(on = true)` * in your MultiNodeConfig. - * */ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala index 4ff65b9f8d..d97d9d5ebe 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala @@ -275,7 +275,7 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case true => self ! ToServer(Done) case _ => throw new RuntimeException("Throttle was requested from the TestConductor, but no transport " + - "adapters available that support throttling. Specify `testTransport(on = true)` in your MultiNodeConfig") + "adapters available that support throttling. Specify `testTransport(on = true)` in your MultiNodeConfig") } stay() case _: DisconnectMsg => @@ -290,7 +290,7 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case TerminateMsg(Right(exitValue)) => System.exit(exitValue) stay() // needed because Java doesn’t have Nothing - case _: Done => stay() //FIXME what should happen? + case _: Done => stay() // FIXME what should happen? } } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index 2b0deb2d80..9e0e470062 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -116,8 +116,9 @@ private[akka] object RemoteConnection { def shutdown(channel: Channel): Unit = { try { try channel.close() - finally try channel.getFactory.shutdown() - finally channel.getFactory.releaseExternalResources() + finally + try channel.getFactory.shutdown() + finally channel.getFactory.releaseExternalResources() } catch { case NonFatal(_) => // silence this one to not make tests look like they failed, it's not really critical diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala index 93e87c5456..f34f75784a 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -114,8 +114,8 @@ abstract class MultiNodeConfig { else ConfigFactory.empty val configs = _nodeConf - .get(myself) - .toList ::: _commonConf.toList ::: transportConfig :: MultiNodeSpec.nodeConfig :: MultiNodeSpec.baseConfig :: Nil + .get(myself) + .toList ::: _commonConf.toList ::: transportConfig :: MultiNodeSpec.nodeConfig :: MultiNodeSpec.baseConfig :: Nil configs.reduceLeft(_.withFallback(_)) } @@ -318,17 +318,17 @@ abstract class MultiNodeSpec( def this(config: MultiNodeConfig) = this(config, { - val name = TestKitUtils.testNameFromCallStack(classOf[MultiNodeSpec], "".r) - config => - try { - ActorSystem(name, config) - } catch { - // Retry creating the system once as when using port = 0 two systems may try and use the same one. - // RTE is for aeron, CE for netty - case _: RemoteTransportException => ActorSystem(name, config) - case _: ChannelException => ActorSystem(name, config) - } - }) + val name = TestKitUtils.testNameFromCallStack(classOf[MultiNodeSpec], "".r) + config => + try { + ActorSystem(name, config) + } catch { + // Retry creating the system once as when using port = 0 two systems may try and use the same one. + // RTE is for aeron, CE for netty + case _: RemoteTransportException => ActorSystem(name, config) + case _: ChannelException => ActorSystem(name, config) + } + }) val log: LoggingAdapter = Logging(system, this)(_.getClass.getName) @@ -351,9 +351,9 @@ abstract class MultiNodeSpec( testConductor.removeNode(myself) within(testConductor.Settings.BarrierTimeout.duration) { awaitCond({ - // Await.result(testConductor.getNodes, remaining).filterNot(_ == myself).isEmpty - testConductor.getNodes.await.forall(_ == myself) - }, message = s"Nodes not shutdown: ${testConductor.getNodes.await}") + // Await.result(testConductor.getNodes, remaining).filterNot(_ == myself).isEmpty + testConductor.getNodes.await.forall(_ == myself) + }, message = s"Nodes not shutdown: ${testConductor.getNodes.await}") } } shutdown(system, duration = shutdownTimeout) @@ -498,16 +498,17 @@ abstract class MultiNodeSpec( base.indexOf(tag) match { case -1 => base case _ => - val replaceWith = try r.addr - catch { - case NonFatal(e) => - // might happen if all test cases are ignored (excluded) and - // controller node is finished/exited before r.addr is run - // on the other nodes - val unresolved = "akka://unresolved-replacement-" + r.role.name - log.warning(unresolved + " due to: " + e.getMessage) - unresolved - } + val replaceWith = + try r.addr + catch { + case NonFatal(e) => + // might happen if all test cases are ignored (excluded) and + // controller node is finished/exited before r.addr is run + // on the other nodes + val unresolved = "akka://unresolved-replacement-" + r.role.name + log.warning(unresolved + " due to: " + e.getMessage) + unresolved + } base.replace(tag, replaceWith) } } diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index a8a87e7396..f3e8f0a572 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -69,7 +69,7 @@ abstract class ActorSystemActivator extends BundleActivator { val filter = s"(objectclass=${classOf[LogService].getName})" context.addServiceListener(logServiceListner, filter) - //Small trick to create an event if the service is registered before this start listing for + // Small trick to create an event if the service is registered before this start listing for Option(context.getServiceReference(classOf[LogService].getName)).foreach(x => { logServiceListner.serviceChanged(new ServiceEvent(ServiceEvent.REGISTERED, x)) }) @@ -101,7 +101,7 @@ abstract class ActorSystemActivator extends BundleActivator { * @param system the actor system */ def registerService(context: BundleContext, system: ActorSystem): Unit = { - registration.foreach(_.unregister()) //Cleanup + registration.foreach(_.unregister()) // Cleanup val properties = new Properties() properties.put("name", system.name) registration = Some( diff --git a/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala b/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala index 2399a6c49c..bedbc51f54 100644 --- a/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala +++ b/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala @@ -27,7 +27,7 @@ class DefaultOSGiLogger extends DefaultLogger { */ def uninitialisedReceive: Receive = { var messagesToLog: Vector[LogEvent] = Vector() - //the Default Logger needs to be aware of the LogService which is published on the EventStream + // the Default Logger needs to be aware of the LogService which is published on the EventStream context.system.eventStream.subscribe(self, classOf[LogService]) context.system.eventStream.unsubscribe(self, UnregisteringLogService.getClass) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala index 1ac15f1212..6d1e24878d 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala @@ -36,7 +36,6 @@ object UpdatedDurableState { } /** - * * @param persistenceId The persistence id of the origin entity. * @param revision The revision number from the origin entity. * @param value The object value. @@ -60,7 +59,6 @@ object DeletedDurableState { } /** - * * @param persistenceId The persistence id of the origin entity. * @param revision The revision number from the origin entity. * @param offset The offset that can be used in next `changes` or `currentChanges` query. diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala index 8f5beaa493..bde92061e3 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala @@ -15,7 +15,6 @@ package akka.persistence.query * and even though those types can easily be converted to each other it is most convenient * for the end user to get access to the Java or Scala `Source` directly. * One of the implementations can delegate to the other. - * */ trait ReadJournalProvider { diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala index cd70947819..b317ab27a3 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala @@ -23,7 +23,6 @@ import akka.stream.javadsl.Source * Configuration settings can be defined in the configuration section with the * absolute path corresponding to the identifier, which is `"akka.persistence.query.journal.leveldb"` * for the default [[LeveldbReadJournal#Identifier]]. See `reference.conf`. - * */ @deprecated("Use another journal implementation", "2.6.15") class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala index dec65f1ea3..30fe7fafa9 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala @@ -236,7 +236,7 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) mat)) .named("eventsByTag-" + URLEncoder.encode(tag, ByteString.UTF_8)) - case NoOffset => eventsByTag(tag, Sequence(0L)) //recursive + case NoOffset => eventsByTag(tag, Sequence(0L)) // recursive case _ => throw new IllegalArgumentException( "LevelDB does not support " + Logging.simpleName(offset.getClass) + " offsets") diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala index a76ee5433a..553564ddd1 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala @@ -16,9 +16,9 @@ class OffsetSpec extends AnyWordSpecLike with Matchers { "TimeBasedUUID offset" must { "be ordered correctly" in { - val uuid1 = TimeBasedUUID(UUID.fromString("49225740-2019-11ea-a752-ffae2393b6e4")) //2019-12-16T15:32:36.148Z[UTC] - val uuid2 = TimeBasedUUID(UUID.fromString("91be23d0-2019-11ea-a752-ffae2393b6e4")) //2019-12-16T15:34:37.965Z[UTC] - val uuid3 = TimeBasedUUID(UUID.fromString("91f95810-2019-11ea-a752-ffae2393b6e4")) //2019-12-16T15:34:38.353Z[UTC] + val uuid1 = TimeBasedUUID(UUID.fromString("49225740-2019-11ea-a752-ffae2393b6e4")) // 2019-12-16T15:32:36.148Z[UTC] + val uuid2 = TimeBasedUUID(UUID.fromString("91be23d0-2019-11ea-a752-ffae2393b6e4")) // 2019-12-16T15:34:37.965Z[UTC] + val uuid3 = TimeBasedUUID(UUID.fromString("91f95810-2019-11ea-a752-ffae2393b6e4")) // 2019-12-16T15:34:38.353Z[UTC] uuid1.value.timestamp() should be < uuid2.value.timestamp() uuid2.value.timestamp() should be < uuid3.value.timestamp() List(uuid2, uuid1, uuid3).sorted shouldEqual List(uuid1, uuid2, uuid3) diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala index f29b8ca896..ed8506046c 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala @@ -60,7 +60,7 @@ class QuerySerializerSpec extends AkkaSpec { } "serialize EventEnvelope with TimeBasedUUID Offset" in { - //2019-12-16T15:32:36.148Z[UTC] + // 2019-12-16T15:32:36.148Z[UTC] val uuidString = "49225740-2019-11ea-a752-ffae2393b6e4" val timeUuidOffset = TimeBasedUUID(UUID.fromString(uuidString)) verifySerialization( @@ -79,7 +79,7 @@ class QuerySerializerSpec extends AkkaSpec { } "serialize TimeBasedUUID Offset" in { - //2019-12-16T15:32:36.148Z[UTC] + // 2019-12-16T15:32:36.148Z[UTC] val uuidString = "49225740-2019-11ea-a752-ffae2393b6e4" val timeUuidOffset = TimeBasedUUID(UUID.fromString(uuidString)) verifySerialization(timeUuidOffset) diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala index c899c8e957..b077b075a4 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala @@ -61,13 +61,13 @@ object PersistencePluginProxySpec { |akka.extensions = ["akka.persistence.Persistence"] |akka.persistence.journal.auto-start-journals = [""] |akka.persistence.journal.proxy.target-journal-address = "${system - .asInstanceOf[ExtendedActorSystem] - .provider - .getDefaultAddress}" + .asInstanceOf[ExtendedActorSystem] + .provider + .getDefaultAddress}" |akka.persistence.snapshot-store.proxy.target-snapshot-store-address = "${system - .asInstanceOf[ExtendedActorSystem] - .provider - .getDefaultAddress}" + .asInstanceOf[ExtendedActorSystem] + .provider + .getDefaultAddress}" """.stripMargin) class ExamplePersistentActor(probe: ActorRef, name: String) extends NamedPersistentActor(name) { diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala index 39d7c964e6..5c3ce8f891 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala @@ -93,7 +93,8 @@ class SharedLeveldbJournalSpec extends AkkaSpec(SharedLeveldbJournalSpec.config) @nowarn val sharedLeveldbStoreCls = classOf[SharedLeveldbStore] system.actorOf(Props(sharedLeveldbStoreCls, storeConfig), "store") - val storePath = RootActorPath(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress) / "user" / "store" + val storePath = + RootActorPath(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress) / "user" / "store" val appA = systemA.actorOf(Props(classOf[ExampleApp], probeA.ref, storePath)) val appB = systemB.actorOf(Props(classOf[ExampleApp], probeB.ref, storePath)) diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala b/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala index 3f1e0085e8..74c3fb2077 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala @@ -7,7 +7,7 @@ package akka.persistence import scala.language.implicitConversions sealed abstract class CapabilityFlag { - private val capturedStack = (new Throwable().getStackTrace) + private val capturedStack = new Throwable().getStackTrace .filter(_.getMethodName.startsWith("supports")) .find { el => val clazz = Class.forName(el.getClassName) diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala index e9fd818325..253b4a0624 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala @@ -335,8 +335,8 @@ abstract class JournalSpec(config: Config) val WriterUuid = writerUuid probe.expectMsgPF() { case WriteMessageSuccess( - PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid, _, Some(`meta`)), - _) => + PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid, _, Some(`meta`)), + _) => payload should be(event) } diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala index e697ba2119..41db0c36bc 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala @@ -215,8 +215,8 @@ abstract class SnapshotStoreSpec(config: Config) snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, Long.MaxValue), senderProbe.ref) senderProbe.expectMsgPF() { case LoadSnapshotResult( - Some(SelectedSnapshot(meta @ SnapshotMetadata(Pid, 100, _), payload)), - Long.MaxValue) => + Some(SelectedSnapshot(meta @ SnapshotMetadata(Pid, 100, _), payload)), + Long.MaxValue) => payload should be(snap) meta.metadata should ===(Some(fictionalMeta)) } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala index e3c6ae0cb2..41b05e6ee7 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala @@ -33,7 +33,7 @@ private[testkit] trait EventStorage extends TestKitStorage[JournalOperation, Per // and therefore must be done at the same time with the update, not before updateOrSetNew(key, v => v ++ mapAny(key, elems).toVector) - override def reprToSeqNum(repr: (PersistentRepr)): Long = repr.sequenceNr + override def reprToSeqNum(repr: PersistentRepr): Long = repr.sequenceNr def add(elems: immutable.Seq[PersistentRepr]): Unit = elems.groupBy(_.persistenceId).foreach { gr => @@ -50,10 +50,11 @@ private[testkit] trait EventStorage extends TestKitStorage[JournalOperation, Per val processed = grouped.map { case (pid, els) => - currentPolicy.tryProcess(pid, WriteEvents(els.map(_.payload match { - case Tagged(payload, _) => payload - case nonTagged => nonTagged - }))) + currentPolicy.tryProcess(pid, + WriteEvents(els.map(_.payload match { + case Tagged(payload, _) => payload + case nonTagged => nonTagged + }))) } val reduced: ProcessingResult = diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala index 392e172356..f65950e48f 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala @@ -102,21 +102,22 @@ object ProcessingPolicy { returnNonTrigger: => ProcessingResult, cond: (String, U) => Boolean, onLimitExceed: => Unit) - extends ReturnAfterNextNCond(returnOnTrigger, returnNonTrigger, new Function2[String, U, Boolean] { + extends ReturnAfterNextNCond(returnOnTrigger, returnNonTrigger, + new Function2[String, U, Boolean] { - var counter = 0 + var counter = 0 - override def apply(persistenceId: String, v1: U): Boolean = { - val intRes = cond(persistenceId, v1) - if (intRes && counter < numberToCount) { - counter += 1 - if (counter == numberToCount) onLimitExceed - intRes - } else { - false + override def apply(persistenceId: String, v1: U): Boolean = { + val intRes = cond(persistenceId, v1) + if (intRes && counter < numberToCount) { + counter += 1 + if (counter == numberToCount) onLimitExceed + intRes + } else { + false + } } - } - }) + }) } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala index d56af53886..925594ca48 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala @@ -28,7 +28,8 @@ private[testkit] trait SnapshotStorage override protected val DefaultPolicy = SnapshotPolicies.PassAll def tryAdd(meta: SnapshotMetadata, payload: Any): Unit = { - currentPolicy.tryProcess(meta.persistenceId, WriteSnapshot(SnapshotMeta(meta.sequenceNr, meta.timestamp), payload)) match { + currentPolicy.tryProcess(meta.persistenceId, + WriteSnapshot(SnapshotMeta(meta.sequenceNr, meta.timestamp), payload)) match { case ProcessingSuccess => add(meta.persistenceId, (meta, payload)) Success(()) @@ -56,7 +57,8 @@ private[testkit] trait SnapshotStorage } def tryDelete(meta: SnapshotMetadata): Unit = { - currentPolicy.tryProcess(meta.persistenceId, DeleteSnapshotByMeta(SnapshotMeta(meta.sequenceNr, meta.timestamp))) match { + currentPolicy.tryProcess(meta.persistenceId, + DeleteSnapshotByMeta(SnapshotMeta(meta.sequenceNr, meta.timestamp))) match { case ProcessingSuccess => delete(meta.persistenceId, _._1.sequenceNr == meta.sequenceNr) case f: ProcessingFailure => throw f.error @@ -99,7 +101,6 @@ case object SnapshotMeta { sealed trait SnapshotOperation /** - * * Storage read operation for recovery of the persistent actor. * * @param criteria criteria with which snapshot is searched diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala index 5f01bcd548..d4557fbf16 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala @@ -89,7 +89,7 @@ import akka.stream.scaladsl.Sink private def system: ActorSystem[_] = actorTestKit.system if (system.settings.config.getBoolean("akka.persistence.testkit.events.serialize") || - system.settings.config.getBoolean("akka.persistence.testkit.snapshots.serialize")) { + system.settings.config.getBoolean("akka.persistence.testkit.snapshots.serialize")) { system.log.warn( "Persistence TestKit serialization enabled when using EventSourcedBehaviorTestKit, this is not intended. " + "make sure you create the system used in the test with the config from EventSourcedBehaviorTestKit.config " + @@ -147,14 +147,15 @@ import akka.stream.scaladsl.Sink actor ! command - val reply = try { - replyProbe.receiveMessage() - } catch { - case NonFatal(_) => - throw new AssertionError(s"Missing expected reply for command [$command].") - } finally { - replyProbe.stop() - } + val reply = + try { + replyProbe.receiveMessage() + } catch { + case NonFatal(_) => + throw new AssertionError(s"Missing expected reply for command [$command].") + } finally { + replyProbe.stop() + } val newState = getState() val newEvents = getEvents(seqNrBefore + 1) diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala index c46989b1eb..2e31507ed9 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala @@ -46,13 +46,14 @@ final class InMemStorageExtension(system: ExtendedActorSystem) extends Extension def resetPolicy(): Unit = defaultStorage().resetPolicy() def storageFor(key: String): EventStorage = - stores.computeIfAbsent(key, _ => { - // we don't really care about the key here, we just want separate instances - if (PersistenceTestKit.Settings(system).serialize) { - new SerializedEventStorageImpl(system) - } else { - new SimpleEventStorageImpl - } - }) + stores.computeIfAbsent(key, + _ => { + // we don't really care about the key here, we just want separate instances + if (PersistenceTestKit.Settings(system).serialize) { + new SerializedEventStorageImpl(system) + } else { + new SimpleEventStorageImpl + } + }) } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala index 3d94d37cf6..5c7cfbe231 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala @@ -41,11 +41,10 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { def reprToSeqNum(repr: R): Long def findMany(key: K, fromInclusive: Int, maxNum: Int): Option[Vector[R]] = - read(key).flatMap( - value => - if (value.size > fromInclusive) - Some(value.drop(fromInclusive).take(maxNum)) - else None) + read(key).flatMap(value => + if (value.size > fromInclusive) + Some(value.drop(fromInclusive).take(maxNum)) + else None) def removeFirstInExpectNextQueue(key: K): Unit = lock.synchronized { expectNextQueue.get(key).foreach { item => @@ -149,9 +148,10 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { } def deleteToSeqNumber(key: K, toSeqNumberInclusive: Long): Unit = - updateOrSetNew(key, value => { - value.dropWhile(reprToSeqNum(_) <= toSeqNumberInclusive) - }) + updateOrSetNew(key, + value => { + value.dropWhile(reprToSeqNum(_) <= toSeqNumberInclusive) + }) def clearAllPreservingSeqNumbers(): Unit = lock.synchronized { eventsMap.keys.foreach(removePreservingSeqNumber) diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala index d974f738d9..6a01ed5182 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala @@ -50,10 +50,12 @@ final private[akka] class EventsByPersistenceIdStage( log.debug("tryPush available. Query for {} {} result {}", currentSequenceNr, currentSequenceNr, event) event.headOption match { case Some(pr) => - push(out, EventEnvelope(Sequence(pr.sequenceNr), pr.persistenceId, pr.sequenceNr, pr.payload match { - case Tagged(payload, _) => payload - case payload => payload - }, pr.timestamp, pr.metadata)) + push(out, + EventEnvelope(Sequence(pr.sequenceNr), pr.persistenceId, pr.sequenceNr, + pr.payload match { + case Tagged(payload, _) => payload + case payload => payload + }, pr.timestamp, pr.metadata)) if (currentSequenceNr == toSequenceNr) { completeStage() } else { diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala index b64c904828..722aa9d112 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala @@ -107,11 +107,12 @@ final class PersistenceTestKitReadJournal(system: ExtendedActorSystem, @unused c case _ => throw new UnsupportedOperationException("Offsets not supported for persistence test kit currentEventsByTag yet") } - val prs = storage.tryRead(entityType, repr => { - val pid = repr.persistenceId - val slice = persistence.sliceForPersistenceId(pid) - PersistenceId.extractEntityType(pid) == entityType && slice >= minSlice && slice <= maxSlice - }) + val prs = storage.tryRead(entityType, + repr => { + val pid = repr.persistenceId + val slice = persistence.sliceForPersistenceId(pid) + PersistenceId.extractEntityType(pid) == entityType && slice >= minSlice && slice <= maxSlice + }) Source(prs).map { pr => val slice = persistence.sliceForPersistenceId(pr.persistenceId) new typed.EventEnvelope[Event]( diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala index 144728f999..70de0d33d9 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala @@ -144,10 +144,10 @@ private[testkit] trait ExpectOps[U] { val nextInd = nextIndex(persistenceId) val expected = Some(event) val res = awaitAssert({ - val actual = getItem(persistenceId, nextInd) - assert(actual == expected, s"Failed to persist $event, got $actual instead") - actual - }, max = max.dilated, interval = pollInterval) + val actual = getItem(persistenceId, nextInd) + assert(actual == expected, s"Failed to persist $event, got $actual instead") + actual + }, max = max.dilated, interval = pollInterval) setIndex(persistenceId, nextInd + 1) res.get.asInstanceOf[A] @@ -178,12 +178,12 @@ private[testkit] trait ExpectOps[U] { val nextInd = nextIndex(persistenceId) val c = util.BoxedType(cla) val res = awaitAssert({ - val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) - assert(actual.isDefined, s"Expected: $cla but got no event") - val a = actual.get - assert(c.isInstance(a), s"Expected: $cla but got unexpected ${a.getClass}") - a.asInstanceOf[A] - }, max.dilated, interval = pollInterval) + val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) + assert(actual.isDefined, s"Expected: $cla but got no event") + val a = actual.get + assert(c.isInstance(a), s"Expected: $cla but got unexpected ${a.getClass}") + a.asInstanceOf[A] + }, max.dilated, interval = pollInterval) setIndex(persistenceId, nextInd + 1) res } @@ -200,10 +200,10 @@ private[testkit] trait ExpectOps[U] { def expectNothingPersisted(persistenceId: String, max: FiniteDuration): Unit = { val nextInd = nextIndex(persistenceId) assertForDuration({ - val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) - val res = actual.isEmpty - assert(res, s"Found persisted event $actual, but expected None instead") - }, max = max.dilated, interval = pollInterval) + val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) + val res = actual.isEmpty + assert(res, s"Found persisted event $actual, but expected None instead") + }, max = max.dilated, interval = pollInterval) } /** @@ -316,7 +316,7 @@ private[testkit] trait HasStorage[P, R] { protected def storage: TestKitStorage[P, R] - //todo needs to be thread safe (atomic read-increment-write) for parallel tests. Do we need parallel tests support? + // todo needs to be thread safe (atomic read-increment-write) for parallel tests. Do we need parallel tests support? @volatile private var nextIndexByPersistenceId: immutable.Map[String, Int] = Map.empty diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala index 5939f9b1f8..6f92b09590 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala @@ -115,11 +115,11 @@ class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) this.synchronized { val currentGlobalOffset = lastGlobalOffset.get() changes(tag, offset).takeWhile(_.offset match { - case Sequence(fromOffset) => - fromOffset < currentGlobalOffset - case offset => - throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") - }, inclusive = true) + case Sequence(fromOffset) => + fromOffset < currentGlobalOffset + case offset => + throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") + }, inclusive = true) } override def currentChangesBySlices( @@ -130,11 +130,11 @@ class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) this.synchronized { val currentGlobalOffset = lastGlobalOffset.get() changesBySlices(entityType, minSlice, maxSlice, offset).takeWhile(_.offset match { - case Sequence(fromOffset) => - fromOffset < currentGlobalOffset - case offset => - throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") - }, inclusive = true) + case Sequence(fromOffset) => + fromOffset < currentGlobalOffset + case offset => + throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") + }, inclusive = true) } override def changesBySlices( @@ -151,7 +151,8 @@ class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) } def bySliceFromOffset(rec: Record[A]) = { val slice = persistence.sliceForPersistenceId(rec.persistenceId) - PersistenceId.extractEntityType(rec.persistenceId) == entityType && slice >= minSlice && slice <= maxSlice && rec.globalOffset > fromOffset + PersistenceId.extractEntityType( + rec.persistenceId) == entityType && slice >= minSlice && slice <= maxSlice && rec.globalOffset > fromOffset } def bySliceFromOffsetNotDeleted(rec: Record[A]) = bySliceFromOffset(rec) && storeContains(rec.persistenceId) diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala index f7347a8cf8..8e4d28fb7f 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala @@ -148,7 +148,7 @@ trait CommonSnapshotTests extends JavaDslUtils { val a = system.actorOf(Props(classOf[A], pid, Some(testActor))) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala index b187d71ea3..87e46527dd 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala @@ -104,10 +104,10 @@ trait CommonTestKitTests extends JavaDslUtils { override def tryProcess(persistenceId: String, processingUnit: JournalOperation): ProcessingResult = { processingUnit match { case WriteEvents(msgs) => - val ex = msgs.exists({ + val ex = msgs.exists { case B(666) => true case _ => false - }) + } if (ex) { ProcessingSuccess } else { @@ -145,7 +145,7 @@ trait CommonTestKitTests extends JavaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack rejectNextPersisted() rejectNextPersisted() @@ -193,7 +193,7 @@ trait CommonTestKitTests extends JavaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala index 552139efa1..26130d30fb 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala @@ -155,7 +155,7 @@ trait CommonSnapshotTests extends ScalaDslUtils { val a = system.actorOf(Props(classOf[A], pid, Some(testActor))) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala index fc006a236f..f380fec9de 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala @@ -122,10 +122,10 @@ trait CommonTestKitTests extends ScalaDslUtils { override def tryProcess(persistenceId: String, processingUnit: JournalOperation): ProcessingResult = { processingUnit match { case WriteEvents(msgs) => - val ex = msgs.exists({ + val ex = msgs.exists { case B(666) => true case _ => false - }) + } if (ex) { ProcessingSuccess } else { @@ -161,7 +161,7 @@ trait CommonTestKitTests extends ScalaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack rejectNextPersisted() rejectNextPersisted() @@ -209,7 +209,7 @@ trait CommonTestKitTests extends ScalaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala index a917e4ecbd..d5dfa9b127 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala @@ -23,12 +23,13 @@ import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior } trait ScalaDslUtils extends CommonUtils { def eventSourcedBehavior(pid: String, replyOnRecovery: Option[ActorRef[Any]] = None) = - EventSourcedBehavior[TestCommand, Evt, EmptyState](PersistenceId.ofUniqueId(pid), EmptyState(), (_, cmd) => { - cmd match { - case Cmd(data) => Effect.persist(Evt(data)) - case Passivate => Effect.stop().thenRun(_ => replyOnRecovery.foreach(_ ! Stopped)) - } - }, (_, _) => EmptyState()).snapshotWhen((_, _, _) => true).receiveSignal { + EventSourcedBehavior[TestCommand, Evt, EmptyState](PersistenceId.ofUniqueId(pid), EmptyState(), + (_, cmd) => { + cmd match { + case Cmd(data) => Effect.persist(Evt(data)) + case Passivate => Effect.stop().thenRun(_ => replyOnRecovery.foreach(_ ! Stopped)) + } + }, (_, _) => EmptyState()).snapshotWhen((_, _, _) => true).receiveSignal { case (_, RecoveryCompleted) => replyOnRecovery.foreach(_ ! Recovered) } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala index fedad68e1d..3f0d3ac7c3 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala @@ -91,9 +91,10 @@ abstract class EventSourcedBehaviorLoggingSpec(config: Config) s"log internal messages in '$loggerId' logger without logging user data (PersistAll)" in { val doneProbe = createTestProbe[Done]() LoggingTestKit - .debug("Handled command [akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Hellos], " + - "resulting effect: [PersistAll(akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event," + - "akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event)], side effects: [1]") + .debug( + "Handled command [akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Hellos], " + + "resulting effect: [PersistAll(akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event," + + "akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event)], side effects: [1]") .withLoggerName(loggerName) .expect { chattyActor ! Hellos("Mary", "Joe", doneProbe.ref) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala index 44f3206a5c..892622d29e 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala @@ -33,27 +33,26 @@ object ReplicatedEventPublishingSpec { ReplicatedEventSourcing.commonJournalConfig( ReplicationId(EntityType, entityId, replicaId), allReplicas, - PersistenceTestKitReadJournal.Identifier)( - replicationContext => - EventSourcedBehavior[Command, String, Set[String]]( - replicationContext.persistenceId, - Set.empty, - (state, command) => - command match { - case Add(string, replyTo) => - ctx.log.debug("Persisting [{}]", string) - Effect.persist(string).thenRun { _ => - ctx.log.debug("Ack:ing [{}]", string) - replyTo ! Done - } - case Get(replyTo) => - replyTo ! state - Effect.none - case Stop => - Effect.stop() - case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") - }, - (state, string) => state + string)) + PersistenceTestKitReadJournal.Identifier)(replicationContext => + EventSourcedBehavior[Command, String, Set[String]]( + replicationContext.persistenceId, + Set.empty, + (state, command) => + command match { + case Add(string, replyTo) => + ctx.log.debug("Persisting [{}]", string) + Effect.persist(string).thenRun { _ => + ctx.log.debug("Ack:ing [{}]", string) + replyTo ! Done + } + case Get(replyTo) => + replyTo ! state + Effect.none + case Stop => + Effect.stop() + case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") + }, + (state, string) => state + string)) } } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala index bde4dadcf2..401409634f 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala @@ -45,29 +45,27 @@ object ReplicatedEventSourcingTaggingSpec { ReplicatedEventSourcing.commonJournalConfig( ReplicationId("TaggingSpec", entityId, replica), allReplicas, - queryPluginId)( - replicationContext => - EventSourcedBehavior[Command, String, State]( - replicationContext.persistenceId, - State(Set.empty), - (state, command) => - command match { - case Add(string, ack) => - if (state.strings.contains(string)) Effect.none.thenRun(_ => ack ! Done) - else Effect.persist(string).thenRun(_ => ack ! Done) - case GetStrings(replyTo) => - replyTo ! state.strings - Effect.none - }, - (state, event) => state.copy(strings = state.strings + event)) + queryPluginId)(replicationContext => + EventSourcedBehavior[Command, String, State]( + replicationContext.persistenceId, + State(Set.empty), + (state, command) => + command match { + case Add(string, ack) => + if (state.strings.contains(string)) Effect.none.thenRun(_ => ack ! Done) + else Effect.persist(string).thenRun(_ => ack ! Done) + case GetStrings(replyTo) => + replyTo ! state.strings + Effect.none + }, + (state, event) => state.copy(strings = state.strings + event)) // use withTagger to define tagging logic - .withTagger( - event => - // don't apply tags if event was replicated here, it already will appear in queries by tag - // as the origin replica would have tagged it already - if (replicationContext.origin != replicationContext.replicaId) Set.empty - else if (event.length > 10) Set("long-strings", "strings") - else Set("strings"))) + .withTagger(event => + // don't apply tags if event was replicated here, it already will appear in queries by tag + // as the origin replica would have tagged it already + if (replicationContext.origin != replicationContext.replicaId) Set.empty + else if (event.length > 10) Set("long-strings", "strings") + else Set("strings"))) // #tagging } } @@ -110,7 +108,7 @@ class ReplicatedEventSourcingTaggingSpec stringTaggedEvents.map(_.event).toSet should equal(allEvents) val longStrings = query.currentEventsByTag("long-strings", NoOffset).runWith(Sink.seq).futureValue - longStrings should have size (1) + longStrings should have size 1 } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala index 305bebe305..38081f7da6 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala @@ -31,36 +31,37 @@ object ReplicationIllegalAccessSpec { ReplicatedEventSourcing.commonJournalConfig( ReplicationId("IllegalAccessSpec", entityId, replica), AllReplicas, - PersistenceTestKitReadJournal.Identifier)( - replicationContext => - EventSourcedBehavior[Command, String, State]( - replicationContext.persistenceId, - State(Nil), - (_, command) => - command match { - case AccessInCommandHandler(replyTo) => - val exception = try { + PersistenceTestKitReadJournal.Identifier)(replicationContext => + EventSourcedBehavior[Command, String, State]( + replicationContext.persistenceId, + State(Nil), + (_, command) => + command match { + case AccessInCommandHandler(replyTo) => + val exception = + try { replicationContext.origin None } catch { case t: Throwable => Some(t) } - replyTo ! Thrown(exception) - Effect.none - case AccessInPersistCallback(replyTo) => - Effect.persist("cat").thenRun { _ => - val exception = try { + replyTo ! Thrown(exception) + Effect.none + case AccessInPersistCallback(replyTo) => + Effect.persist("cat").thenRun { _ => + val exception = + try { replicationContext.concurrent None } catch { case t: Throwable => Some(t) } - replyTo ! Thrown(exception) - } - }, - (state, event) => state.copy(all = event :: state.all))) + replyTo ! Thrown(exception) + } + }, + (state, event) => state.copy(all = event :: state.all))) } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala index 75f4f4a68c..309fbaf379 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala @@ -123,10 +123,11 @@ class EventSourcedBehaviorFailureSpec LoggingTestKit.error[JournalFailureException].expect { val probe = TestProbe[String]() val excProbe = TestProbe[Throwable]() - spawn(failingPersistentActor(PersistenceId.ofUniqueId("fail-recovery"), probe.ref, { - case (_, RecoveryFailed(t)) => - excProbe.ref ! t - })) + spawn(failingPersistentActor(PersistenceId.ofUniqueId("fail-recovery"), probe.ref, + { + case (_, RecoveryFailed(t)) => + excProbe.ref ! t + })) excProbe.expectMessageType[TestException].message shouldEqual "Nope" probe.expectMessage("stopped") @@ -135,10 +136,11 @@ class EventSourcedBehaviorFailureSpec "handle exceptions from RecoveryFailed signal handler" in { val probe = TestProbe[String]() - val pa = spawn(failingPersistentActor(PersistenceId.ofUniqueId("fail-recovery-twice"), probe.ref, { - case (_, RecoveryFailed(_)) => - throw TestException("recovery call back failure") - })) + val pa = spawn(failingPersistentActor(PersistenceId.ofUniqueId("fail-recovery-twice"), probe.ref, + { + case (_, RecoveryFailed(_)) => + throw TestException("recovery call back failure") + })) pa ! "one" probe.expectMessage("starting") probe.expectMessage("persisting") @@ -160,10 +162,11 @@ class EventSourcedBehaviorFailureSpec LoggingTestKit.error[JournalFailureException].expect { // start again and then the event handler will throw - spawn(failingPersistentActor(pid, probe.ref, { - case (_, RecoveryFailed(t)) => - excProbe.ref ! t - })) + spawn(failingPersistentActor(pid, probe.ref, + { + case (_, RecoveryFailed(t)) => + excProbe.ref ! t + })) excProbe.expectMessageType[TestException].message shouldEqual "wrong event" probe.expectMessage("stopped") @@ -177,7 +180,8 @@ class EventSourcedBehaviorFailureSpec Behaviors .supervise(failingPersistentActor( PersistenceId.ofUniqueId("recovery-ok"), - probe.ref, { + probe.ref, + { case (_, RecoveryCompleted) => probe.ref.tell("starting") throw TestException("recovery call back failure") @@ -292,9 +296,10 @@ class EventSourcedBehaviorFailureSpec case object SomeSignal extends Signal LoggingTestKit.error[TestException].expect { val probe = TestProbe[String]() - val behav = failingPersistentActor(PersistenceId.ofUniqueId("wrong-signal-handler"), probe.ref, { - case (_, SomeSignal) => throw TestException("from signal") - }) + val behav = failingPersistentActor(PersistenceId.ofUniqueId("wrong-signal-handler"), probe.ref, + { + case (_, SomeSignal) => throw TestException("from signal") + }) val c = spawn(behav) probe.expectMessage("starting") c.toClassic ! SomeSignal diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala index f999c842c3..9a9a6d605d 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala @@ -715,7 +715,7 @@ class EventSourcedBehaviorSpec firstThree.size shouldBe 3 val others = queries.currentPersistenceIds(Some(firstThree.last), Long.MaxValue).runWith(Sink.seq).futureValue - firstThree ++ others should contain theSameElementsInOrderAs (all) + firstThree ++ others should contain theSameElementsInOrderAs all } def watcher(toWatch: ActorRef[_]): TestProbe[String] = { diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala index 277c5c9643..199173a4d5 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala @@ -556,7 +556,8 @@ class EventSourcedBehaviorStashSpec Effect.stash() } } - }, { + }, + { case (_, "start-stashing") => true case (_, "unstash") => false case (_, _) => throw new IllegalArgumentException() diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala index 4f54fc9a5e..e8743ecc4e 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala @@ -89,10 +89,11 @@ class EventSourcedBehaviorWatchSpec context.watch(child) - EventSourcedBehavior[Command, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt) + EventSourcedBehavior[Command, String, String](nextPid, emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, eventHandler = (state, evt) => state + evt) }) LoggingTestKit.error[TestException].expect { @@ -127,10 +128,11 @@ class EventSourcedBehaviorWatchSpec context.watch(child) - EventSourcedBehavior[Command, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt).receiveSignal(signalHandler) + EventSourcedBehavior[Command, String, String](nextPid, emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, eventHandler = (state, evt) => state + evt).receiveSignal(signalHandler) }) LoggingTestKit.error[TestException].expect { @@ -156,10 +158,11 @@ class EventSourcedBehaviorWatchSpec probe.ref ! child context.watch(child) - EventSourcedBehavior[Stop.type, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt).receiveSignal { + EventSourcedBehavior[Stop.type, String, String](nextPid, emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, eventHandler = (state, evt) => state + evt).receiveSignal { case (_, t: Terminated) => probe.ref ! HasTerminated(t.ref) Behaviors.stopped @@ -184,10 +187,11 @@ class EventSourcedBehaviorWatchSpec probe.ref ! child context.watch(child) - EventSourcedBehavior[Fail.type, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt).receiveSignal { + EventSourcedBehavior[Fail.type, String, String](nextPid, emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, eventHandler = (state, evt) => state + evt).receiveSignal { case (_, t: ChildFailed) => probe.ref ! ChildHasFailed(t) Behaviors.same diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala index e438c3dec5..da635d8153 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala @@ -90,11 +90,13 @@ class EventSourcedEventAdapterSpec PersistenceQuery(system).readJournalFor[PersistenceTestKitReadJournal](PersistenceTestKitReadJournal.Identifier) private def behavior(pid: PersistenceId, probe: ActorRef[String]): EventSourcedBehavior[String, String, String] = - EventSourcedBehavior(pid, "", commandHandler = { (_, command) => - Effect.persist(command).thenRun(newState => probe ! newState) - }, eventHandler = { (state, evt) => - state + evt - }) + EventSourcedBehavior(pid, "", + commandHandler = { (_, command) => + Effect.persist(command).thenRun(newState => probe ! newState) + }, + eventHandler = { (state, evt) => + state + evt + }) "Event adapter" must { diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala index f61bd569f8..6b14f00350 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala @@ -32,9 +32,9 @@ class EventSourcedSequenceNumberSpec with LogCapturing { private def behavior(pid: PersistenceId, probe: ActorRef[String]): Behavior[String] = - Behaviors.setup( - ctx => - EventSourcedBehavior[String, String, String](pid, "", { + Behaviors.setup(ctx => + EventSourcedBehavior[String, String, String](pid, "", + { (state, command) => state match { case "stashing" => @@ -64,13 +64,14 @@ class EventSourcedSequenceNumberSpec Effect.persist("snapshot") } } - }, { (_, evt) => + }, + { (_, evt) => probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} eventHandler $evt" evt }).snapshotWhen((_, event, _) => event == "snapshot").receiveSignal { - case (_, RecoveryCompleted) => - probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" - }) + case (_, RecoveryCompleted) => + probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" + }) "The sequence number" must { diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala index c45fc60283..8f72ce1574 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala @@ -27,12 +27,14 @@ object EventSourcedStashOverflowSpec { def apply(persistenceId: PersistenceId): Behavior[Command] = EventSourcedBehavior[Command, String, List[String]]( persistenceId, - Nil, { (_, command) => + Nil, + { (_, command) => command match { case DoNothing(replyTo) => Effect.persist(List.empty[String]).thenRun(_ => replyTo ! Done) } - }, { (state, event) => + }, + { (state, event) => // original reproducer slept 2 seconds here but a pure application of an event seems unlikely to take that long // so instead we delay recovery using a special journal event :: state diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala index 08ce68822d..cd1aa2d77a 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala @@ -29,13 +29,15 @@ class LoggerSourceSpec def behavior: Behavior[String] = Behaviors.setup { ctx => ctx.log.info("setting-up-behavior") - EventSourcedBehavior[String, String, String](nextPid(), emptyState = "", commandHandler = (_, _) => { - ctx.log.info("command-received") - Effect.persist("evt") - }, eventHandler = (state, _) => { - ctx.log.info("event-received") - state - }).receiveSignal { + EventSourcedBehavior[String, String, String](nextPid(), emptyState = "", + commandHandler = (_, _) => { + ctx.log.info("command-received") + Effect.persist("evt") + }, + eventHandler = (state, _) => { + ctx.log.info("event-received") + state + }).receiveSignal { case (_, RecoveryCompleted) => ctx.log.info("recovery-completed") case (_, SnapshotCompleted(_)) => case (_, SnapshotFailed(_, _)) => @@ -92,10 +94,11 @@ class LoggerSourceSpec val behavior: Behavior[String] = Behaviors.setup[String] { ctx => ctx.setLoggerName("my-custom-name") - EventSourcedBehavior[String, String, String](nextPid(), emptyState = "", commandHandler = (_, _) => { - ctx.log.info("command-received") - Effect.persist("evt") - }, eventHandler = (state, _) => state) + EventSourcedBehavior[String, String, String](nextPid(), emptyState = "", + commandHandler = (_, _) => { + ctx.log.info("command-received") + Effect.persist("evt") + }, eventHandler = (state, _) => state) } val actor = spawn(behavior) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala index adc2a58dc4..18937208df 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala @@ -72,7 +72,7 @@ object PerformanceSpec { def behavior(name: String, probe: TestProbe[Reply])(other: (Command, Parameters) => Effect[String, String]) = { Behaviors - .supervise({ + .supervise { val parameters = Parameters() EventSourcedBehavior[Command, String, String]( persistenceId = PersistenceId.ofUniqueId(name), @@ -90,7 +90,7 @@ object PerformanceSpec { case (_, RecoveryCompleted) => if (parameters.every(1000)) print("r") } - }) + } .onFailure(SupervisorStrategy.restart.withLoggingEnabled(false)) } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala index 3a65f242da..28a0cdccdd 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala @@ -32,38 +32,37 @@ class DurableStateRevisionSpec with LogCapturing { private def behavior(pid: PersistenceId, probe: ActorRef[String]): Behavior[String] = - Behaviors.setup( - ctx => - DurableStateBehavior[String, String]( - pid, - "", - (state, command) => - state match { - case "stashing" => - command match { - case "unstash" => - probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} unstash" - Effect.persist("normal").thenUnstashAll() - case _ => - Effect.stash() - } - case _ => - command match { - case "cmd" => - probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} onCommand" - Effect - .persist("state") - .thenRun(_ => probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} thenRun") - case "stash" => - probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} stash" - Effect.persist("stashing") - case "snapshot" => - Effect.persist("snapshot") - } - }).receiveSignal { - case (_, RecoveryCompleted) => - probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" - }) + Behaviors.setup(ctx => + DurableStateBehavior[String, String]( + pid, + "", + (state, command) => + state match { + case "stashing" => + command match { + case "unstash" => + probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} unstash" + Effect.persist("normal").thenUnstashAll() + case _ => + Effect.stash() + } + case _ => + command match { + case "cmd" => + probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} onCommand" + Effect + .persist("state") + .thenRun(_ => probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} thenRun") + case "stash" => + probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} stash" + Effect.persist("stashing") + case "snapshot" => + Effect.persist("snapshot") + } + }).receiveSignal { + case (_, RecoveryCompleted) => + probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" + }) "The revision number" must { diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala index 22b2ae8c8e..651124a5b3 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala @@ -32,12 +32,12 @@ import akka.serialization.jackson.CborSerializable object ReplicatedAuctionExampleSpec { - //#setup + // #setup object AuctionEntity { - //#setup + // #setup - //#commands + // #commands type MoneyAmount = Int case class Bid(bidder: String, offer: MoneyAmount, timestamp: Instant, originReplica: ReplicaId) @@ -48,17 +48,17 @@ object ReplicatedAuctionExampleSpec { final case class GetHighestBid(replyTo: ActorRef[Bid]) extends Command final case class IsClosed(replyTo: ActorRef[Boolean]) extends Command private case object Close extends Command // Internal, should not be sent from the outside - //#commands + // #commands - //#events + // #events sealed trait Event extends CborSerializable final case class BidRegistered(bid: Bid) extends Event final case class AuctionFinished(atReplica: ReplicaId) extends Event final case class WinnerDecided(atReplica: ReplicaId, winningBid: Bid, highestCounterOffer: MoneyAmount) extends Event - //#events + // #events - //#phase + // #phase /** * The auction passes through several workflow phases. * First, in `Running` `OfferBid` commands are accepted. @@ -78,15 +78,14 @@ object ReplicatedAuctionExampleSpec { * When the responsible DC has seen all `AuctionFinished` events from other DCs * all other events have also been propagated and it can persist `WinnerDecided` and * the auction is finally `Closed`. - * */ sealed trait AuctionPhase case object Running extends AuctionPhase final case class Closing(finishedAtReplica: Set[ReplicaId]) extends AuctionPhase case object Closed extends AuctionPhase - //#phase + // #phase - //#state + // #state case class AuctionState(phase: AuctionPhase, highestBid: Bid, highestCounterOffer: MoneyAmount) extends CborSerializable { @@ -132,9 +131,9 @@ object ReplicatedAuctionExampleSpec { (first.offer == second.offer && first.timestamp.equals(second.timestamp) && first.originReplica.id .compareTo(second.originReplica.id) < 0) } - //#state + // #state - //#setup + // #setup def apply( replica: ReplicaId, name: String, @@ -180,9 +179,9 @@ object ReplicatedAuctionExampleSpec { val millisUntilClosing = closingAt.toEpochMilli - replicationContext.currentTimeMillis() timers.startSingleTimer(Finish, millisUntilClosing.millis) } - //#setup + // #setup - //#command-handler + // #command-handler def commandHandler(state: AuctionState, command: Command): Effect[Event, AuctionState] = { state.phase match { case Closing(_) | Closed => @@ -230,9 +229,9 @@ object ReplicatedAuctionExampleSpec { } } } - //#command-handler + // #command-handler - //#event-handler + // #event-handler def eventHandler(state: AuctionState, event: Event): AuctionState = { val newState = state.applyEvent(event) @@ -244,9 +243,9 @@ object ReplicatedAuctionExampleSpec { } - //#event-handler + // #event-handler - //#event-triggers + // #event-triggers private def eventTriggers(event: Event, newState: AuctionState): Unit = { event match { case finished: AuctionFinished => @@ -284,11 +283,11 @@ object ReplicatedAuctionExampleSpec { false }) } - //#event-triggers + // #event-triggers - //#setup + // #setup } - //#setup + // #setup } class ReplicatedAuctionExampleSpec diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala index 4978504247..61b1e76ced 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala @@ -65,7 +65,7 @@ object ReplicatedBlogExampleSpec { } } - //#command-handler + // #command-handler private def commandHandler( ctx: ActorContext[Command], replicationContext: ReplicationContext, @@ -100,9 +100,9 @@ object ReplicatedBlogExampleSpec { Effect.none } } - //#command-handler + // #command-handler - //#event-handler + // #event-handler private def eventHandler( ctx: ActorContext[Command], replicationContext: ReplicationContext, @@ -127,7 +127,7 @@ object ReplicatedBlogExampleSpec { state.copy(published = true) } } - //#event-handler + // #event-handler } } diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala index f16a877c1f..e0af599547 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala @@ -15,11 +15,11 @@ import akka.persistence.typed.scaladsl.ReplicatedEventSourcing @nowarn("msg=never used") object ReplicatedEventSourcingCompileOnlySpec { - //#replicas + // #replicas val DCA = ReplicaId("DC-A") val DCB = ReplicaId("DC-B") val AllReplicas = Set(DCA, DCB) - //#replicas + // #replicas val queryPluginId = "" @@ -28,7 +28,7 @@ object ReplicatedEventSourcingCompileOnlySpec { trait Event object Shared { - //#factory-shared + // #factory-shared def apply( system: ActorSystem[_], entityId: String, @@ -40,11 +40,11 @@ object ReplicatedEventSourcingCompileOnlySpec { EventSourcedBehavior[Command, State, Event](???, ???, ???, ???) } } - //#factory-shared + // #factory-shared } object PerReplica { - //#factory + // #factory def apply( system: ActorSystem[_], entityId: String, @@ -56,7 +56,7 @@ object ReplicatedEventSourcingCompileOnlySpec { } } - //#factory + // #factory } } diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala index d9d4d7ea29..b3c58936c4 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala @@ -19,7 +19,7 @@ import akka.persistence.typed.scaladsl.EventSourcedBehavior import akka.persistence.typed.scaladsl.ReplicatedEventSourcing object ReplicatedMovieWatchListExampleSpec { - //#movie-entity + // #movie-entity object MovieWatchList { sealed trait Command final case class AddMovie(movieId: String) extends Command @@ -57,7 +57,7 @@ object ReplicatedMovieWatchListExampleSpec { } } - //#movie-entity + // #movie-entity } diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala index c5bb19f621..728183f064 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala @@ -24,7 +24,7 @@ import akka.serialization.jackson.CborSerializable object ReplicatedShoppingCartExampleSpec { - //#shopping-cart + // #shopping-cart object ShoppingCart { type ProductId = String @@ -79,7 +79,7 @@ object ReplicatedShoppingCartExampleSpec { } } } - //#shopping-cart + // #shopping-cart } class ReplicatedShoppingCartExampleSpec diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala index 29391cd8f2..ba1578ae98 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala @@ -420,7 +420,8 @@ final class ORSet[A] private[akka] ( val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that) val entries0 = if (addDeltaOp) - entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } else { + entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } + else { val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains) ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00) } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala index d475a6b3a8..896c2a88c1 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala @@ -71,9 +71,9 @@ private[akka] final class BehaviorSetup[C, E, S]( Persistence(context.system.classicSystem).configFor(snapshotStore).getBoolean("snapshot-is-optional") if (isSnapshotOptional && (retention match { - case SnapshotCountRetentionCriteriaImpl(_, _, true) => true - case _ => false - })) { + case SnapshotCountRetentionCriteriaImpl(_, _, true) => true + case _ => false + })) { throw new IllegalArgumentException( "Retention criteria with delete events can't be used together with snapshot-is-optional=false. " + "That can result in wrong recovered state if snapshot load fails.") diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala index cd35c6e15f..fbd173837e 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala @@ -64,9 +64,9 @@ private[akka] trait JournalInteractions[C, E, S] { onWriteInitiated(ctx, cmd, repr) val write = AtomicWrite(metadata match { - case OptionVal.Some(meta) => repr.withMetadata(meta) - case _ => repr - }) :: Nil + case OptionVal.Some(meta) => repr.withMetadata(meta) + case _ => repr + }) :: Nil setup.journal .tell(JournalProtocol.WriteMessages(write, setup.selfClassic, setup.writerIdentity.instanceId), setup.selfClassic) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala index 84c4ffe2a2..225de2fdb7 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala @@ -31,7 +31,8 @@ import akka.util.unused import scala.collection.immutable -/*** +/** + * * * INTERNAL API * * Third (of four) behavior of an EventSourcedBehavior. diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala index 16cd976784..7d39bf0a30 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala @@ -168,35 +168,37 @@ private[akka] object Running { meta.version) ReplicatedEventEnvelope(re, replyTo) } - .recoverWithRetries(1, { - // not a failure, the replica is stopping, complete the stream - case _: WatchedActorTerminatedException => - Source.empty - })) + .recoverWithRetries(1, + { + // not a failure, the replica is stopping, complete the stream + case _: WatchedActorTerminatedException => + Source.empty + })) source.runWith(Sink.ignore)(SystemMaterializer(system).materializer) // TODO support from journal to fast forward https://github.com/akka/akka/issues/29311 state.copy( replicationControl = - state.replicationControl.updated(replicaId, new ReplicationStreamControl { - override def fastForward(sequenceNumber: Long): Unit = { - // (logging is safe here since invoked on message receive - OptionVal(controlRef.get) match { - case OptionVal.Some(control) => - if (setup.internalLogger.isDebugEnabled) - setup.internalLogger.debug("Fast forward replica [{}] to [{}]", replicaId, sequenceNumber) - control.fastForward(sequenceNumber) - case _ => - // stream not started yet, ok, fast forward is an optimization - if (setup.internalLogger.isDebugEnabled) - setup.internalLogger.debug( - "Ignoring fast forward replica [{}] to [{}], stream not started yet", - replicaId, - sequenceNumber) + state.replicationControl.updated(replicaId, + new ReplicationStreamControl { + override def fastForward(sequenceNumber: Long): Unit = { + // (logging is safe here since invoked on message receive + OptionVal(controlRef.get) match { + case OptionVal.Some(control) => + if (setup.internalLogger.isDebugEnabled) + setup.internalLogger.debug("Fast forward replica [{}] to [{}]", replicaId, sequenceNumber) + control.fastForward(sequenceNumber) + case _ => + // stream not started yet, ok, fast forward is an optimization + if (setup.internalLogger.isDebugEnabled) + setup.internalLogger.debug( + "Ignoring fast forward replica [{}] to [{}], stream not started yet", + replicaId, + sequenceNumber) + } } - } - })) + })) } else { state } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala index 6a36faeba6..4b42d8c087 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala @@ -5,7 +5,7 @@ package akka.persistence.typed.javadsl import java.util.Objects -import java.util.function.{ BiFunction, Predicate, Supplier, Function => JFunction } +import java.util.function.{ BiFunction, Function => JFunction, Predicate, Supplier } import scala.compat.java8.FunctionConverters._ @@ -198,12 +198,12 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, Effect[Event, State]]): Unit = { cases = CommandHandlerCase[Command, Event, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, Effect[Event, State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, Effect[Event, State]]]) :: cases } /** @@ -233,9 +233,10 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int def onCommand( predicate: Predicate[Command], handler: JFunction[Command, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, Effect[Event, State]] { - override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) - }) + addCase(cmd => predicate.test(cmd), + new BiFunction[S, Command, Effect[Event, State]] { + override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) + }) this } @@ -268,9 +269,10 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int def onCommand[C <: Command]( commandClass: Class[C], handler: JFunction[C, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[Event, State]] { - override def apply(state: S, cmd: C): Effect[Event, State] = handler(cmd) - }) + onCommand[C](commandClass, + new BiFunction[S, C, Effect[Event, State]] { + override def apply(state: S, cmd: C): Effect[Event, State] = handler(cmd) + }) } /** @@ -285,9 +287,10 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[Event, State]] { - override def apply(state: S, cmd: C): Effect[Event, State] = handler.get() - }) + onCommand[C](commandClass, + new BiFunction[S, C, Effect[Event, State]] { + override def apply(state: S, cmd: C): Effect[Event, State] = handler.get() + }) } /** @@ -328,9 +331,10 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: JFunction[Command, Effect[Event, State]]): CommandHandler[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[Event, State]] { - override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) - }) + addCase(_ => true, + new BiFunction[S, Command, Effect[Event, State]] { + override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) + }) build() } @@ -352,9 +356,10 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: Supplier[Effect[Event, State]]): CommandHandler[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[Event, State]] { - override def apply(state: S, cmd: Command): Effect[Event, State] = handler.get() - }) + addCase(_ => true, + new BiFunction[S, Command, Effect[Event, State]] { + override def apply(state: S, cmd: Command): Effect[Event, State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala index 136589ec8f..a1e9270c8e 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala @@ -5,7 +5,7 @@ package akka.persistence.typed.javadsl import java.util.Objects -import java.util.function.{ BiFunction, Predicate, Supplier, Function => JFunction } +import java.util.function.{ BiFunction, Function => JFunction, Predicate, Supplier } import scala.compat.java8.FunctionConverters._ @@ -210,12 +210,12 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St predicate: Command => Boolean, handler: BiFunction[S, Command, ReplyEffect[Event, State]]): Unit = { cases = CommandHandlerCase[Command, Event, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[Event, State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[Event, State]]]) :: cases } /** @@ -243,9 +243,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St */ def onCommand(predicate: Predicate[Command], handler: JFunction[Command, ReplyEffect[Event, State]]) : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) - }) + addCase(cmd => predicate.test(cmd), + new BiFunction[S, Command, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) + }) this } @@ -276,9 +277,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St */ def onCommand[C <: Command](commandClass: Class[C], handler: JFunction[C, ReplyEffect[Event, State]]) : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler(cmd) - }) + onCommand[C](commandClass, + new BiFunction[S, C, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler(cmd) + }) } /** @@ -293,9 +295,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler.get() - }) + onCommand[C](commandClass, + new BiFunction[S, C, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler.get() + }) } /** @@ -338,9 +341,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St */ def onAnyCommand( handler: JFunction[Command, ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) - }) + addCase(_ => true, + new BiFunction[S, Command, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) + }) build() } @@ -362,9 +366,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * @return A CommandHandlerWithReply from the appended states. */ def onAnyCommand(handler: Supplier[ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler.get() - }) + addCase(_ => true, + new BiFunction[S, Command, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala index a1120918ae..f5f97c54d6 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala @@ -91,9 +91,10 @@ import akka.util.ccompat.JavaConverters._ * finding mistakes. */ def reply[ReplyMessage](replyTo: ActorRef[ReplyMessage], replyWithMessage: ReplyMessage): ReplyEffect[Event, State] = - none().thenReply[ReplyMessage](replyTo, new function.Function[State, ReplyMessage] { - override def apply(param: State): ReplyMessage = replyWithMessage - }) + none().thenReply[ReplyMessage](replyTo, + new function.Function[State, ReplyMessage] { + override def apply(param: State): ReplyMessage = replyWithMessage + }) /** * When [[EventSourcedBehaviorWithEnforcedReplies]] is used there will be compilation errors if the returned effect @@ -134,7 +135,6 @@ import akka.util.ccompat.JavaConverters._ * but if a known subtype of `State` is expected that can be specified instead (preferably by * explicitly typing the lambda parameter like so: `thenRun((SubState state) -> { ... })`). * If the state is not of the expected type an [[java.lang.ClassCastException]] is thrown. - * */ final def thenRun[NewState <: State](callback: function.Procedure[NewState]): EffectBuilder[Event, State] = CompositeEffect(this, SideEffect[State](s => callback.apply(s.asInstanceOf[NewState]))) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala index a03c052c79..fa658fea2b 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala @@ -5,7 +5,7 @@ package akka.persistence.typed.javadsl import java.util.Objects -import java.util.function.{ BiFunction, Predicate, Supplier, Function => JFunction } +import java.util.function.{ BiFunction, Function => JFunction, Predicate, Supplier } import scala.compat.java8.FunctionConverters._ @@ -197,11 +197,11 @@ final class EventHandlerBuilderByState[S <: State, State, Event]( private def addCase(eventPredicate: Event => Boolean, handler: BiFunction[State, Event, State]): Unit = { cases = EventHandlerCase[State, Event]( - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - eventPredicate = eventPredicate, - handler) :: cases + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + eventPredicate = eventPredicate, + handler) :: cases } /** @@ -231,9 +231,10 @@ final class EventHandlerBuilderByState[S <: State, State, Event]( def onEvent[E <: Event]( eventClass: Class[E], handler: JFunction[E, State]): EventHandlerBuilderByState[S, State, Event] = { - onEvent[E](eventClass, new BiFunction[S, E, State] { - override def apply(state: S, event: E): State = handler(event) - }) + onEvent[E](eventClass, + new BiFunction[S, E, State] { + override def apply(state: S, event: E): State = handler(event) + }) } /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala index 0b79c0c620..b822668b49 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala @@ -35,7 +35,7 @@ abstract class ReplicatedEventSourcedBehavior[Command, Event, State]( */ @InternalApi override def apply(context: TypedActorContext[Command]): Behavior[Command] = { createEventSourcedBehavior() - // context not user extendable so there should never be any other impls + // context not user extendable so there should never be any other impls .withReplication(replicationContext.asInstanceOf[ReplicationContextImpl]) .withEventPublishing(withEventPublishing) } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala index 9107cc43d8..5c23f40d4a 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala @@ -340,10 +340,10 @@ import scala.collection.immutable.TreeMap def orsetFromProto(orset: ReplicatedEventSourcing.ORSet): ORSet[Any] = { val elements: Iterator[Any] = - (orset.getStringElementsList.iterator.asScala ++ + orset.getStringElementsList.iterator.asScala ++ orset.getIntElementsList.iterator.asScala ++ orset.getLongElementsList.iterator.asScala ++ - orset.getOtherElementsList.iterator.asScala.map(wrappedSupport.deserializePayload)) + orset.getOtherElementsList.iterator.asScala.map(wrappedSupport.deserializePayload) val dots = orset.getDotsList.asScala.map(versionVectorFromProto).iterator val elementsMap = elements.zip(dots).toMap @@ -371,7 +371,7 @@ import scala.collection.immutable.TreeMap VersionVector(entries.get(0).getKey, entries.get(0).getVersion) else { val versions = TreeMap.empty[String, Long] ++ versionVector.getEntriesList.asScala.map(entry => - entry.getKey -> entry.getVersion) + entry.getKey -> entry.getVersion) VersionVector(versions) } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala index 1b5b6dc04f..000a97cb10 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala @@ -199,12 +199,12 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, Effect[State]]): Unit = { cases = CommandHandlerCase[Command, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, Effect[State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, Effect[State]]]) :: cases } /** @@ -234,9 +234,10 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp def onCommand( predicate: Predicate[Command], handler: JFunction[Command, Effect[State]]): CommandHandlerBuilderByState[Command, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, Effect[State]] { - override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) - }) + addCase(cmd => predicate.test(cmd), + new BiFunction[S, Command, Effect[State]] { + override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) + }) this } @@ -269,9 +270,10 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp def onCommand[C <: Command]( commandClass: Class[C], handler: JFunction[C, Effect[State]]): CommandHandlerBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[State]] { - override def apply(state: S, cmd: C): Effect[State] = handler(cmd) - }) + onCommand[C](commandClass, + new BiFunction[S, C, Effect[State]] { + override def apply(state: S, cmd: C): Effect[State] = handler(cmd) + }) } /** @@ -286,9 +288,10 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[Effect[State]]): CommandHandlerBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[State]] { - override def apply(state: S, cmd: C): Effect[State] = handler.get() - }) + onCommand[C](commandClass, + new BiFunction[S, C, Effect[State]] { + override def apply(state: S, cmd: C): Effect[State] = handler.get() + }) } /** @@ -329,9 +332,10 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: JFunction[Command, Effect[State]]): CommandHandler[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[State]] { - override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) - }) + addCase(_ => true, + new BiFunction[S, Command, Effect[State]] { + override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) + }) build() } @@ -353,9 +357,10 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: Supplier[Effect[State]]): CommandHandler[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[State]] { - override def apply(state: S, cmd: Command): Effect[State] = handler.get() - }) + addCase(_ => true, + new BiFunction[S, Command, Effect[State]] { + override def apply(state: S, cmd: Command): Effect[State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala index 7ab3972f40..48c7d7952e 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala @@ -209,12 +209,12 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, ReplyEffect[State]]): Unit = { cases = CommandHandlerCase[Command, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[State]]]) :: cases } /** @@ -244,9 +244,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I def onCommand( predicate: Predicate[Command], handler: JFunction[Command, ReplyEffect[State]]): CommandHandlerWithReplyBuilderByState[Command, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, ReplyEffect[State]] { - override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) - }) + addCase(cmd => predicate.test(cmd), + new BiFunction[S, Command, ReplyEffect[State]] { + override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) + }) this } @@ -279,9 +280,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I def onCommand[C <: Command]( commandClass: Class[C], handler: JFunction[C, ReplyEffect[State]]): CommandHandlerWithReplyBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[State]] { - override def apply(state: S, cmd: C): ReplyEffect[State] = handler(cmd) - }) + onCommand[C](commandClass, + new BiFunction[S, C, ReplyEffect[State]] { + override def apply(state: S, cmd: C): ReplyEffect[State] = handler(cmd) + }) } /** @@ -296,9 +298,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[ReplyEffect[State]]): CommandHandlerWithReplyBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[State]] { - override def apply(state: S, cmd: C): ReplyEffect[State] = handler.get() - }) + onCommand[C](commandClass, + new BiFunction[S, C, ReplyEffect[State]] { + override def apply(state: S, cmd: C): ReplyEffect[State] = handler.get() + }) } /** @@ -339,9 +342,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I * @return A CommandHandlerWithReply from the appended states. */ def onAnyCommand(handler: JFunction[Command, ReplyEffect[State]]): CommandHandlerWithReply[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[State]] { - override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) - }) + addCase(_ => true, + new BiFunction[S, Command, ReplyEffect[State]] { + override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) + }) build() } @@ -363,9 +367,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I * @return A CommandHandlerWithReply from the appended states. */ def onAnyCommand(handler: Supplier[ReplyEffect[State]]): CommandHandlerWithReply[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[State]] { - override def apply(state: S, cmd: Command): ReplyEffect[State] = handler.get() - }) + addCase(_ => true, + new BiFunction[S, Command, ReplyEffect[State]] { + override def apply(state: S, cmd: Command): ReplyEffect[State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala index 88fcff0dc6..92cf7b37e0 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala @@ -96,9 +96,10 @@ import akka.persistence.typed.state.internal._ * finding mistakes. */ def reply[ReplyMessage](replyTo: ActorRef[ReplyMessage], replyWithMessage: ReplyMessage): ReplyEffect[State] = - none().thenReply[ReplyMessage](replyTo, new function.Function[State, ReplyMessage] { - override def apply(param: State): ReplyMessage = replyWithMessage - }) + none().thenReply[ReplyMessage](replyTo, + new function.Function[State, ReplyMessage] { + override def apply(param: State): ReplyMessage = replyWithMessage + }) /** * When [[DurableStateBehaviorWithEnforcedReplies]] is used there will be compilation errors if the returned effect @@ -139,7 +140,6 @@ import akka.persistence.typed.state.internal._ * but if a known subtype of `State` is expected that can be specified instead (preferably by * explicitly typing the lambda parameter like so: `thenRun((SubState state) -> { ... })`). * If the state is not of the expected type an [[java.lang.ClassCastException]] is thrown. - * */ final def thenRun[NewState <: State](callback: function.Procedure[NewState]): EffectBuilder[State] = CompositeEffect(this, SideEffect[State](s => callback.apply(s.asInstanceOf[NewState]))) diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala index b863a074e5..de8c766318 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala @@ -33,8 +33,8 @@ object EventSourcedProducerQueueSpec { akka.persistence.journal.inmem.test-serialization = on akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/EventSourcedDurableProducerQueueSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" """) } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala index 39ef03c04e..346933059b 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala @@ -22,8 +22,8 @@ object ReliableDeliveryWithEventSourcedProducerQueueSpec { akka.persistence.journal.inmem.test-serialization = on akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/ProducerControllerWithEventSourcedProducerQueueSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.reliable-delivery.consumer-controller.flow-control-window = 20 """) } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala index d7086eccb2..8f6cf6e34a 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala @@ -25,8 +25,8 @@ object WorkPullingWithEventSourcedProducerQueueSpec { akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/WorkPullingWithEventSourcedProducerQueueSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.reliable-delivery.consumer-controller.flow-control-window = 20 """) } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala index abe7fd6355..7b7c8d9d7d 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala @@ -76,34 +76,33 @@ object PersistentActorCompileOnlyTest { } val behavior: Behavior[Command] = - Behaviors.setup( - ctx => - EventSourcedBehavior[Command, Event, EventsInFlight]( - persistenceId = PersistenceId.ofUniqueId("recovery-complete-id"), - emptyState = EventsInFlight(0, Map.empty), - commandHandler = (state, cmd) => - cmd match { - case DoSideEffect(data) => - Effect.persist(IntentRecorded(state.nextCorrelationId, data)).thenRun { _ => - performSideEffect(ctx.self, state.nextCorrelationId, data) - } - case AcknowledgeSideEffect(correlationId) => - Effect.persist(SideEffectAcknowledged(correlationId)) - }, - eventHandler = (state, evt) => - evt match { - case IntentRecorded(correlationId, data) => - EventsInFlight( - nextCorrelationId = correlationId + 1, - dataByCorrelationId = state.dataByCorrelationId + (correlationId -> data)) - case SideEffectAcknowledged(correlationId) => - state.copy(dataByCorrelationId = state.dataByCorrelationId - correlationId) - }).receiveSignal { - case (state, RecoveryCompleted) => - state.dataByCorrelationId.foreach { - case (correlationId, data) => performSideEffect(ctx.self, correlationId, data) - } - }) + Behaviors.setup(ctx => + EventSourcedBehavior[Command, Event, EventsInFlight]( + persistenceId = PersistenceId.ofUniqueId("recovery-complete-id"), + emptyState = EventsInFlight(0, Map.empty), + commandHandler = (state, cmd) => + cmd match { + case DoSideEffect(data) => + Effect.persist(IntentRecorded(state.nextCorrelationId, data)).thenRun { _ => + performSideEffect(ctx.self, state.nextCorrelationId, data) + } + case AcknowledgeSideEffect(correlationId) => + Effect.persist(SideEffectAcknowledged(correlationId)) + }, + eventHandler = (state, evt) => + evt match { + case IntentRecorded(correlationId, data) => + EventsInFlight( + nextCorrelationId = correlationId + 1, + dataByCorrelationId = state.dataByCorrelationId + (correlationId -> data)) + case SideEffectAcknowledged(correlationId) => + state.copy(dataByCorrelationId = state.dataByCorrelationId - correlationId) + }).receiveSignal { + case (state, RecoveryCompleted) => + state.dataByCorrelationId.foreach { + case (correlationId, data) => performSideEffect(ctx.self, correlationId, data) + } + }) } @@ -194,27 +193,26 @@ object PersistentActorCompileOnlyTest { def worker(task: Task): Behavior[Nothing] = ??? - val behavior: Behavior[Command] = Behaviors.setup( - ctx => - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId.ofUniqueId("asdf"), - emptyState = State(Nil), - commandHandler = (_, cmd) => - cmd match { - case RegisterTask(task) => - Effect.persist(TaskRegistered(task)).thenRun { _ => - val child = ctx.spawn[Nothing](worker(task), task) - // This assumes *any* termination of the child may trigger a `TaskDone`: - ctx.watchWith(child, TaskDone(task)) - } - case TaskDone(task) => Effect.persist(TaskRemoved(task)) - }, - eventHandler = (state, evt) => - evt match { - case TaskRegistered(task) => State(task :: state.tasksInFlight) - case TaskRemoved(task) => - State(state.tasksInFlight.filter(_ != task)) - })) + val behavior: Behavior[Command] = Behaviors.setup(ctx => + EventSourcedBehavior[Command, Event, State]( + persistenceId = PersistenceId.ofUniqueId("asdf"), + emptyState = State(Nil), + commandHandler = (_, cmd) => + cmd match { + case RegisterTask(task) => + Effect.persist(TaskRegistered(task)).thenRun { _ => + val child = ctx.spawn[Nothing](worker(task), task) + // This assumes *any* termination of the child may trigger a `TaskDone`: + ctx.watchWith(child, TaskDone(task)) + } + case TaskDone(task) => Effect.persist(TaskRemoved(task)) + }, + eventHandler = (state, evt) => + evt match { + case TaskRegistered(task) => State(task :: state.tasksInFlight) + case TaskRemoved(task) => + State(state.tasksInFlight.filter(_ != task)) + })) } @@ -238,14 +236,14 @@ object PersistentActorCompileOnlyTest { if (currentState == newMood) Effect.none else Effect.persist(MoodChanged(newMood)) - //#commonChainedEffects + // #commonChainedEffects // Example factoring out a chained effect to use in several places with `thenRun` val commonChainedEffects: Mood => Unit = _ => println("Command processed") // Then in a command handler: Effect .persist(Remembered("Yep")) // persist event .thenRun(commonChainedEffects) // add on common chained effect - //#commonChainedEffects + // #commonChainedEffects val commandHandler: CommandHandler[Command, Event, Mood] = { (state, cmd) => cmd match { diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala index 8085b8a657..d3accaab64 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala @@ -34,7 +34,7 @@ object BasicPersistentBehaviorCompileOnly { import akka.persistence.typed.scaladsl.RetentionCriteria object FirstExample { - //#command + // #command sealed trait Command final case class Add(data: String) extends Command case object Clear extends Command @@ -42,13 +42,13 @@ object BasicPersistentBehaviorCompileOnly { sealed trait Event final case class Added(data: String) extends Event case object Cleared extends Event - //#command + // #command - //#state + // #state final case class State(history: List[String] = Nil) - //#state + // #state - //#command-handler + // #command-handler import akka.persistence.typed.scaladsl.Effect val commandHandler: (State, Command) => Effect[Event, State] = { (state, command) => @@ -57,9 +57,9 @@ object BasicPersistentBehaviorCompileOnly { case Clear => Effect.persist(Cleared) } } - //#command-handler + // #command-handler - //#effects + // #effects def onCommand(subscriber: ActorRef[State], state: State, command: Command): Effect[Event, State] = { command match { case Add(data) => @@ -68,29 +68,29 @@ object BasicPersistentBehaviorCompileOnly { Effect.persist(Cleared).thenRun((newState: State) => subscriber ! newState).thenStop() } } - //#effects + // #effects - //#event-handler + // #event-handler val eventHandler: (State, Event) => State = { (state, event) => event match { case Added(data) => state.copy((data :: state.history).take(5)) case Cleared => State(Nil) } } - //#event-handler + // #event-handler - //#behavior + // #behavior def apply(id: String): Behavior[Command] = EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId(id), emptyState = State(Nil), commandHandler = commandHandler, eventHandler = eventHandler) - //#behavior + // #behavior } - //#structure + // #structure object MyPersistentBehavior { sealed trait Command sealed trait Event @@ -103,13 +103,13 @@ object BasicPersistentBehaviorCompileOnly { commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) } - //#structure + // #structure import MyPersistentBehavior._ object RecoveryBehavior { def apply(persistenceId: PersistenceId): Behavior[Command] = - //#recovery + // #recovery EventSourcedBehavior[Command, Event, State]( persistenceId = persistenceId, emptyState = State(), @@ -119,37 +119,37 @@ object BasicPersistentBehaviorCompileOnly { case (state, RecoveryCompleted) => throw new NotImplementedError("TODO: add some end-of-recovery side-effect here") } - //#recovery + // #recovery } object RecoveryDisabledBehavior { def apply(): Behavior[Command] = - //#recovery-disabled + // #recovery-disabled EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withRecovery(Recovery.disabled) - //#recovery-disabled + // #recovery-disabled } object TaggingBehavior { def apply(): Behavior[Command] = - //#tagging + // #tagging EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withTagger(_ => Set("tag1", "tag2")) - //#tagging + // #tagging } object TaggingBehavior2 { sealed trait OrderCompleted extends Event - //#tagging-query + // #tagging-query val NumberOfEntityGroups = 10 def tagEvent(entityId: String, event: Event): Set[String] = { @@ -168,29 +168,30 @@ object BasicPersistentBehaviorCompileOnly { eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withTagger(event => tagEvent(entityId, event)) } - //#tagging-query + // #tagging-query } object WrapBehavior { def apply(): Behavior[Command] = - //#wrapPersistentBehavior + // #wrapPersistentBehavior Behaviors.setup[Command] { context => EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), - commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), + commandHandler = + (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .snapshotWhen((state, _, _) => { context.log.info2("Snapshot actor {} => state: {}", context.self.path.name, state) true }) } - //#wrapPersistentBehavior + // #wrapPersistentBehavior } object Supervision { def apply(): Behavior[Command] = - //#supervision + // #supervision EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), @@ -198,7 +199,7 @@ object BasicPersistentBehaviorCompileOnly { eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .onPersistFailure( SupervisorStrategy.restartWithBackoff(minBackoff = 10.seconds, maxBackoff = 60.seconds, randomFactor = 0.1)) - //#supervision + // #supervision } object BehaviorWithContext { @@ -224,7 +225,7 @@ object BasicPersistentBehaviorCompileOnly { final case class BookingCompleted(orderNr: String) extends Event - //#snapshottingEveryN + // #snapshottingEveryN EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -232,9 +233,9 @@ object BasicPersistentBehaviorCompileOnly { commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 1000, keepNSnapshots = 2)) - //#snapshottingEveryN + // #snapshottingEveryN - //#snapshottingPredicate + // #snapshottingPredicate EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), @@ -244,9 +245,9 @@ object BasicPersistentBehaviorCompileOnly { case (state, BookingCompleted(_), sequenceNumber) => true case (state, event, sequenceNumber) => false } - //#snapshottingPredicate + // #snapshottingPredicate - //#snapshotSelection + // #snapshotSelection import akka.persistence.typed.SnapshotSelectionCriteria EventSourcedBehavior[Command, Event, State]( @@ -255,9 +256,9 @@ object BasicPersistentBehaviorCompileOnly { commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withRecovery(Recovery.withSnapshotSelectionCriteria(SnapshotSelectionCriteria.none)) - //#snapshotSelection + // #snapshotSelection - //#retentionCriteria + // #retentionCriteria import akka.persistence.typed.scaladsl.Effect @@ -271,9 +272,9 @@ object BasicPersistentBehaviorCompileOnly { case (state, event, sequenceNumber) => false } .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 100, keepNSnapshots = 2)) - //#retentionCriteria + // #retentionCriteria - //#snapshotAndEventDeletes + // #snapshotAndEventDeletes EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -286,9 +287,9 @@ object BasicPersistentBehaviorCompileOnly { case (state, _: DeleteSnapshotsFailed) => // react to failure case (state, _: DeleteEventsFailed) => // react to failure } - //#snapshotAndEventDeletes + // #snapshotAndEventDeletes - //#retentionCriteriaWithSignals + // #retentionCriteriaWithSignals EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -300,24 +301,24 @@ object BasicPersistentBehaviorCompileOnly { case (state, _: SnapshotFailed) => // react to failure case (state, _: DeleteSnapshotsFailed) => // react to failure } - //#retentionCriteriaWithSignals + // #retentionCriteriaWithSignals - //#event-wrapper + // #event-wrapper case class Wrapper[T](event: T) class WrapperEventAdapter[T] extends EventAdapter[T, Wrapper[T]] { override def toJournal(e: T): Wrapper[T] = Wrapper(e) override def fromJournal(p: Wrapper[T], manifest: String): EventSeq[T] = EventSeq.single(p.event) override def manifest(event: T): String = "" } - //#event-wrapper + // #event-wrapper - //#install-event-adapter + // #install-event-adapter EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .eventAdapter(new WrapperEventAdapter[Event]) - //#install-event-adapter + // #install-event-adapter } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala index 8bd8f3b519..57ed76147d 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala @@ -17,17 +17,17 @@ import akka.persistence.typed.scaladsl.EventSourcedBehavior object BlogPostEntity { // commands, events, state defined here - //#behavior + // #behavior - //#event + // #event sealed trait Event final case class PostAdded(postId: String, content: PostContent) extends Event final case class BodyChanged(postId: String, newBody: String) extends Event final case class Published(postId: String) extends Event - //#event + // #event - //#state + // #state sealed trait State case object BlankState extends State @@ -42,30 +42,30 @@ object BlogPostEntity { final case class PublishedState(content: PostContent) extends State { def postId: String = content.postId } - //#state + // #state - //#commands + // #commands sealed trait Command - //#reply-command + // #reply-command final case class AddPost(content: PostContent, replyTo: ActorRef[StatusReply[AddPostDone]]) extends Command final case class AddPostDone(postId: String) - //#reply-command + // #reply-command final case class GetPost(replyTo: ActorRef[PostContent]) extends Command final case class ChangeBody(newBody: String, replyTo: ActorRef[Done]) extends Command final case class Publish(replyTo: ActorRef[Done]) extends Command final case class PostContent(postId: String, title: String, body: String) - //#commands + // #commands - //#behavior + // #behavior def apply(entityId: String, persistenceId: PersistenceId): Behavior[Command] = { Behaviors.setup { context => context.log.info("Starting BlogPostEntity {}", entityId) EventSourcedBehavior[Command, Event, State](persistenceId, emptyState = BlankState, commandHandler, eventHandler) } } - //#behavior + // #behavior - //#command-handler + // #command-handler private val commandHandler: (State, Command) => Effect[Event, State] = { (state, command) => state match { @@ -95,13 +95,13 @@ object BlogPostEntity { } private def addPost(cmd: AddPost): Effect[Event, State] = { - //#reply + // #reply val evt = PostAdded(cmd.content.postId, cmd.content) Effect.persist(evt).thenRun { _ => // After persist is done additional side effects can be performed cmd.replyTo ! StatusReply.Success(AddPostDone(cmd.content.postId)) } - //#reply + // #reply } private def changeBody(state: DraftState, cmd: ChangeBody): Effect[Event, State] = { @@ -127,9 +127,9 @@ object BlogPostEntity { replyTo ! state.content Effect.none } - //#command-handler + // #command-handler - //#event-handler + // #event-handler private val eventHandler: (State, Event) => State = { (state, event) => state match { @@ -157,9 +157,9 @@ object BlogPostEntity { throw new IllegalStateException(s"unexpected event [$event] in state [$state]") } } - //#event-handler + // #event-handler - //#behavior + // #behavior // commandHandler and eventHandler defined here } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala index 42b6491b82..032391b85f 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala @@ -17,9 +17,9 @@ import akka.persistence.typed.state.scaladsl.DurableStateBehavior object BlogPostEntityDurableState { // commands, state defined here - //#behavior + // #behavior - //#state + // #state sealed trait State case object BlankState extends State @@ -34,30 +34,30 @@ object BlogPostEntityDurableState { final case class PublishedState(content: PostContent) extends State { def postId: String = content.postId } - //#state + // #state - //#commands + // #commands sealed trait Command - //#reply-command + // #reply-command final case class AddPost(content: PostContent, replyTo: ActorRef[StatusReply[AddPostDone]]) extends Command final case class AddPostDone(postId: String) - //#reply-command + // #reply-command final case class GetPost(replyTo: ActorRef[PostContent]) extends Command final case class ChangeBody(newBody: String, replyTo: ActorRef[Done]) extends Command final case class Publish(replyTo: ActorRef[Done]) extends Command final case class PostContent(postId: String, title: String, body: String) - //#commands + // #commands - //#behavior + // #behavior def apply(entityId: String, persistenceId: PersistenceId): Behavior[Command] = { Behaviors.setup { context => context.log.info("Starting BlogPostEntityDurableState {}", entityId) DurableStateBehavior[Command, State](persistenceId, emptyState = BlankState, commandHandler) } } - //#behavior + // #behavior - //#command-handler + // #command-handler private val commandHandler: (State, Command) => Effect[State] = { (state, command) => state match { @@ -87,12 +87,12 @@ object BlogPostEntityDurableState { } private def addPost(cmd: AddPost): Effect[State] = { - //#reply + // #reply Effect.persist(DraftState(cmd.content)).thenRun { _ => // After persist is done additional side effects can be performed cmd.replyTo ! StatusReply.Success(AddPostDone(cmd.content.postId)) } - //#reply + // #reply } private def changeBody(state: DraftState, cmd: ChangeBody): Effect[State] = { @@ -117,8 +117,8 @@ object BlogPostEntityDurableState { replyTo ! state.content Effect.none } - //#command-handler - //#behavior + // #command-handler + // #behavior // commandHandler defined here } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala index bd3d658550..0931b707d6 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala @@ -25,19 +25,19 @@ import akka.serialization.jackson.CborSerializable @nowarn object DurableStatePersistentBehaviorCompileOnly { object FirstExample { - //#command + // #command sealed trait Command[ReplyMessage] extends CborSerializable final case object Increment extends Command[Nothing] final case class IncrementBy(value: Int) extends Command[Nothing] final case class GetValue(replyTo: ActorRef[State]) extends Command[State] final case object Delete extends Command[Nothing] - //#command + // #command - //#state + // #state final case class State(value: Int) extends CborSerializable - //#state + // #state - //#command-handler + // #command-handler import akka.persistence.typed.state.scaladsl.Effect val commandHandler: (State, Command[_]) => Effect[State] = (state, command) => @@ -47,19 +47,19 @@ object DurableStatePersistentBehaviorCompileOnly { case GetValue(replyTo) => Effect.reply(replyTo)(state) case Delete => Effect.delete[State]() } - //#command-handler + // #command-handler - //#behavior + // #behavior def counter(id: String): DurableStateBehavior[Command[_], State] = { DurableStateBehavior.apply[Command[_], State]( persistenceId = PersistenceId.ofUniqueId(id), emptyState = State(0), commandHandler = commandHandler) } - //#behavior + // #behavior } - //#structure + // #structure object MyPersistentCounter { sealed trait Command[ReplyMessage] extends CborSerializable @@ -73,13 +73,13 @@ object DurableStatePersistentBehaviorCompileOnly { (state, command) => throw new NotImplementedError("TODO: process the command & return an Effect")) } } - //#structure + // #structure import MyPersistentCounter._ object MyPersistentCounterWithReplies { - //#effects + // #effects sealed trait Command[ReplyMessage] extends CborSerializable final case class IncrementWithConfirmation(replyTo: ActorRef[Done]) extends Command[Done] final case class GetValue(replyTo: ActorRef[State]) extends Command[State] @@ -100,7 +100,7 @@ object DurableStatePersistentBehaviorCompileOnly { Effect.reply(replyTo)(state) }) } - //#effects + // #effects } object BehaviorWithContext { @@ -123,13 +123,13 @@ object DurableStatePersistentBehaviorCompileOnly { object TaggingBehavior { def apply(): Behavior[Command[_]] = - //#tagging + // #tagging DurableStateBehavior[Command[_], State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(0), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect")) .withTag("tag1") - //#tagging + // #tagging } object WrapBehavior { @@ -137,7 +137,7 @@ object DurableStatePersistentBehaviorCompileOnly { import akka.persistence.typed.state.scaladsl.DurableStateBehavior.CommandHandler def apply(): Behavior[Command[_]] = - //#wrapPersistentBehavior + // #wrapPersistentBehavior Behaviors.setup[Command[_]] { context => DurableStateBehavior[Command[_], State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -147,6 +147,6 @@ object DurableStatePersistentBehaviorCompileOnly { Effect.none }) } - //#wrapPersistentBehavior + // #wrapPersistentBehavior } } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/PersistentFsmToTypedMigrationSpec.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/PersistentFsmToTypedMigrationSpec.scala index d5babb4569..1b4e0028ac 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/PersistentFsmToTypedMigrationSpec.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/PersistentFsmToTypedMigrationSpec.scala @@ -51,24 +51,24 @@ object ShoppingCartBehavior { def apply(pid: PersistenceId) = behavior(pid) - //#commands + // #commands sealed trait Command case class AddItem(item: Item) extends Command case object Buy extends Command case object Leave extends Command case class GetCurrentCart(replyTo: ActorRef[ShoppingCart]) extends Command private case object Timeout extends Command - //#commands + // #commands - //#state + // #state sealed trait State case class LookingAround(cart: ShoppingCart) extends State case class Shopping(cart: ShoppingCart) extends State case class Inactive(cart: ShoppingCart) extends State case class Paid(cart: ShoppingCart) extends State - //#state + // #state - //#snapshot-adapter + // #snapshot-adapter val persistentFSMSnapshotAdapter: SnapshotAdapter[State] = PersistentFSMMigration.snapshotAdapter[State] { case (stateIdentifier, data, _) => val cart = data.asInstanceOf[ShoppingCart] @@ -80,9 +80,9 @@ object ShoppingCartBehavior { case id => throw new IllegalStateException(s"Unexpected state identifier $id") } } - //#snapshot-adapter + // #snapshot-adapter - //#event-adapter + // #event-adapter class PersistentFsmEventAdapter extends EventAdapter[DomainEvent, Any] { override def toJournal(e: DomainEvent): Any = e override def manifest(event: DomainEvent): String = "" @@ -101,11 +101,11 @@ object ShoppingCartBehavior { } } - //#event-adapter + // #event-adapter val StateTimeout = "state-timeout" - //#command-handler + // #command-handler def commandHandler(timers: TimerScheduler[Command])(state: State, command: Command): Effect[DomainEvent, State] = state match { case LookingAround(cart) => @@ -152,9 +152,9 @@ object ShoppingCartBehavior { Effect.none } } - //#command-handler + // #command-handler - //#event-handler + // #event-handler def eventHandler(state: State, event: DomainEvent): State = { state match { case la @ LookingAround(cart) => @@ -178,7 +178,7 @@ object ShoppingCartBehavior { case Paid(_) => state // no events after paid } } - //#event-handler + // #event-handler private def behavior(pid: PersistenceId): Behavior[Command] = Behaviors.withTimers[Command] { timers => @@ -189,7 +189,7 @@ object ShoppingCartBehavior { eventHandler) .snapshotAdapter(persistentFSMSnapshotAdapter) .eventAdapter(new PersistentFsmEventAdapter()) - //#signal-handler + // #signal-handler .receiveSignal { case (state, RecoveryCompleted) => state match { @@ -198,7 +198,7 @@ object ShoppingCartBehavior { case _ => } } - //#signal-handler + // #signal-handler } } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala index 9520cf97c7..7ce446f097 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala @@ -14,7 +14,7 @@ import akka.persistence.typed.scaladsl.EventSourcedBehavior object StashingExample { - //#stashing + // #stashing object TaskManager { sealed trait Command @@ -81,5 +81,5 @@ object StashingExample { } } } - //#stashing + // #stashing } diff --git a/akka-persistence/src/main/scala-3/akka/persistence/TraitOrder.scala b/akka-persistence/src/main/scala-3/akka/persistence/TraitOrder.scala index e044ee356b..0e6985f770 100644 --- a/akka-persistence/src/main/scala-3/akka/persistence/TraitOrder.scala +++ b/akka-persistence/src/main/scala-3/akka/persistence/TraitOrder.scala @@ -16,4 +16,4 @@ private[persistence] object TraitOrder { // No-op on Scala 3 def checkBefore(clazz: Class[_], one: Class[_], other: Class[_]): Unit = () -} \ No newline at end of file +} diff --git a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala index aca0305dba..c0bda5cfe4 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala @@ -625,14 +625,15 @@ private[persistence] trait Eventsourced } private val recoveryBehavior: Receive = { - val _receiveRecover = try receiveRecover - catch { - case NonFatal(e) => - try onRecoveryFailure(e, Some(e)) - finally context.stop(self) - returnRecoveryPermit() - Actor.emptyBehavior - } + val _receiveRecover = + try receiveRecover + catch { + case NonFatal(e) => + try onRecoveryFailure(e, Some(e)) + finally context.stop(self) + returnRecoveryPermit() + Actor.emptyBehavior + } { case PersistentRepr(payload, _) if recoveryRunning && _receiveRecover.isDefinedAt(payload) => @@ -685,32 +686,33 @@ private[persistence] trait Eventsourced } try message match { - case LoadSnapshotResult(snapshot, toSnr) => - loadSnapshotResult(snapshot, toSnr) + case LoadSnapshotResult(snapshot, toSnr) => + loadSnapshotResult(snapshot, toSnr) - case LoadSnapshotFailed(cause) => - if (isSnapshotOptional) { - log.info( - "Snapshot load error for persistenceId [{}]. Replaying all events since snapshot-is-optional=true", - persistenceId) - loadSnapshotResult(snapshot = None, recovery.toSequenceNr) - } else { - timeoutCancellable.cancel() - try onRecoveryFailure(cause, event = None) + case LoadSnapshotFailed(cause) => + if (isSnapshotOptional) { + log.info( + "Snapshot load error for persistenceId [{}]. Replaying all events since snapshot-is-optional=true", + persistenceId) + loadSnapshotResult(snapshot = None, recovery.toSequenceNr) + } else { + timeoutCancellable.cancel() + try onRecoveryFailure(cause, event = None) + finally context.stop(self) + returnRecoveryPermit() + } + + case RecoveryTick(true) => + try onRecoveryFailure( + new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"), + event = None) finally context.stop(self) returnRecoveryPermit() - } - case RecoveryTick(true) => - try onRecoveryFailure( - new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"), - event = None) - finally context.stop(self) - returnRecoveryPermit() - - case other => - stashInternally(other) - } catch { + case other => + stashInternally(other) + } + catch { case NonFatal(e) => returnRecoveryPermit() throw e @@ -749,49 +751,50 @@ private[persistence] trait Eventsourced override def stateReceive(receive: Receive, message: Any) = try message match { - case ReplayedMessage(p) => - try { - eventSeenInInterval = true - updateLastSequenceNr(p) - Eventsourced.super.aroundReceive(recoveryBehavior, p) - } catch { - case NonFatal(t) => - timeoutCancellable.cancel() - try onRecoveryFailure(t, Some(p.payload)) - finally context.stop(self) - returnRecoveryPermit() - } - case RecoverySuccess(highestJournalSeqNr) => - timeoutCancellable.cancel() - onReplaySuccess() // callback for subclass implementation - val highestSeqNr = Math.max(highestJournalSeqNr, lastSequenceNr) - sequenceNr = highestSeqNr - setLastSequenceNr(highestSeqNr) - _recoveryRunning = false - try Eventsourced.super.aroundReceive(recoveryBehavior, RecoveryCompleted) - finally transitToProcessingState() // in finally in case exception and resume strategy - // if exception from RecoveryCompleted the permit is returned in below catch - returnRecoveryPermit() - case ReplayMessagesFailure(cause) => - timeoutCancellable.cancel() - try onRecoveryFailure(cause, event = None) - finally context.stop(self) - returnRecoveryPermit() - case RecoveryTick(false) if !eventSeenInInterval => - timeoutCancellable.cancel() - try onRecoveryFailure( - new RecoveryTimedOut( - s"Recovery timed out, didn't get event within $timeout, highest sequence number seen $lastSequenceNr"), - event = None) - finally context.stop(self) - returnRecoveryPermit() - case RecoveryTick(false) => - eventSeenInInterval = false - case RecoveryTick(true) => - // snapshot tick, ignore - case other => - stashInternally(other) - } catch { + case ReplayedMessage(p) => + try { + eventSeenInInterval = true + updateLastSequenceNr(p) + Eventsourced.super.aroundReceive(recoveryBehavior, p) + } catch { + case NonFatal(t) => + timeoutCancellable.cancel() + try onRecoveryFailure(t, Some(p.payload)) + finally context.stop(self) + returnRecoveryPermit() + } + case RecoverySuccess(highestJournalSeqNr) => + timeoutCancellable.cancel() + onReplaySuccess() // callback for subclass implementation + val highestSeqNr = Math.max(highestJournalSeqNr, lastSequenceNr) + sequenceNr = highestSeqNr + setLastSequenceNr(highestSeqNr) + _recoveryRunning = false + try Eventsourced.super.aroundReceive(recoveryBehavior, RecoveryCompleted) + finally transitToProcessingState() // in finally in case exception and resume strategy + // if exception from RecoveryCompleted the permit is returned in below catch + returnRecoveryPermit() + case ReplayMessagesFailure(cause) => + timeoutCancellable.cancel() + try onRecoveryFailure(cause, event = None) + finally context.stop(self) + returnRecoveryPermit() + case RecoveryTick(false) if !eventSeenInInterval => + timeoutCancellable.cancel() + try onRecoveryFailure( + new RecoveryTimedOut( + s"Recovery timed out, didn't get event within $timeout, highest sequence number seen $lastSequenceNr"), + event = None) + finally context.stop(self) + returnRecoveryPermit() + case RecoveryTick(false) => + eventSeenInInterval = false + case RecoveryTick(true) => + // snapshot tick, ignore + case other => + stashInternally(other) + } + catch { case NonFatal(e) => returnRecoveryPermit() throw e diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala index 6ed2eab3b3..8999ee283e 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala @@ -91,7 +91,7 @@ trait PersistenceIdentity { //#persistence-identity trait PersistenceRecovery { - //#persistence-recovery + // #persistence-recovery /** * Called when the persistent actor is started for the first time. * The returned [[Recovery]] object defines how the Actor will recover its persistent state before @@ -101,7 +101,7 @@ trait PersistenceRecovery { */ def recovery: Recovery = Recovery() - //#persistence-recovery + // #persistence-recovery } trait PersistenceStash extends Stash with StashFactory { @@ -400,18 +400,19 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { log.debug(s"Create plugin: $pluginActorName $pluginClassName") val pluginClass = system.dynamicAccess.getClassFor[Any](pluginClassName).get val pluginDispatcherId = pluginConfig.getString("plugin-dispatcher") - val pluginActorArgs: List[AnyRef] = try { - Reflect.findConstructor(pluginClass, List(pluginConfig, configPath)) // will throw if not found - List(pluginConfig, configPath) - } catch { - case NonFatal(_) => - try { - Reflect.findConstructor(pluginClass, List(pluginConfig)) // will throw if not found - List(pluginConfig) - } catch { - case NonFatal(_) => Nil - } // otherwise use empty constructor - } + val pluginActorArgs: List[AnyRef] = + try { + Reflect.findConstructor(pluginClass, List(pluginConfig, configPath)) // will throw if not found + List(pluginConfig, configPath) + } catch { + case NonFatal(_) => + try { + Reflect.findConstructor(pluginClass, List(pluginConfig)) // will throw if not found + List(pluginConfig) + } catch { + case NonFatal(_) => Nil + } // otherwise use empty constructor + } val pluginActorProps = Props(Deploy(dispatcher = pluginDispatcherId), pluginClass, pluginActorArgs) system.systemActorOf(pluginActorProps, pluginActorName) } @@ -473,7 +474,7 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { numberOfRanges * rangeSize == numberOfSlices, s"numberOfRanges [$numberOfRanges] must be a whole number divisor of numberOfSlices [$numberOfSlices].") (0 until numberOfRanges).map { i => - (i * rangeSize until i * rangeSize + rangeSize) + i * rangeSize until i * rangeSize + rangeSize }.toVector } diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala index d5657987a2..16e16ce72f 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala @@ -42,10 +42,10 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per // only check that all persistenceIds are equal when there's more than one in the Seq if (payload match { - case l: List[PersistentRepr] => l.tail.nonEmpty // avoids calling .size - case v: Vector[PersistentRepr] => v.size > 1 - case _ => true // some other collection type, let's just check - }) payload.foreach { pr => + case l: List[PersistentRepr] => l.tail.nonEmpty // avoids calling .size + case v: Vector[PersistentRepr] => v.size > 1 + case _ => true // some other collection type, let's just check + }) payload.foreach { pr => if (pr.persistenceId != payload.head.persistenceId) throw new IllegalArgumentException( "AtomicWrite must contain messages for the same persistenceId, " + diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala index 95d5083420..b399be5ee4 100644 --- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala +++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala @@ -50,7 +50,7 @@ private[akka] class SnapshotAfter(config: Config) extends Extension { */ val isSnapshotAfterSeqNo: Long => Boolean = snapshotAfterValue match { case Some(snapShotAfterValue) => (seqNo: Long) => seqNo % snapShotAfterValue == 0 - case None => (_: Long) => false //always false, if snapshotAfter is not specified in config + case None => (_: Long) => false // always false, if snapshotAfter is not specified in config } } @@ -64,7 +64,6 @@ private[akka] class SnapshotAfter(config: Config) extends Extension { * Persistence execution order is: persist -> wait for ack -> apply state. * Incoming messages are deferred until the state is applied. * State Data is constructed based on domain events, according to user's implementation of applyEvent function. - * */ @deprecated("Use EventSourcedBehavior", "2.6.0") trait PersistentFSM[S <: FSMState, D, E] extends PersistentActor with PersistentFSMBase[S, D, E] with ActorLogging { @@ -141,16 +140,16 @@ trait PersistentFSM[S <: FSMState, D, E] extends PersistentActor with Persistent override private[akka] def applyState(nextState: State): Unit = { var eventsToPersist: immutable.Seq[Any] = nextState.domainEvents.toList - //Prevent StateChangeEvent persistence when staying in the same state, except when state defines a timeout + // Prevent StateChangeEvent persistence when staying in the same state, except when state defines a timeout if (nextState.notifies || nextState.timeout.nonEmpty) { eventsToPersist = eventsToPersist :+ StateChangeEvent(nextState.stateName.identifier, nextState.timeout) } if (eventsToPersist.isEmpty) { - //If there are no events to persist, just apply the state + // If there are no events to persist, just apply the state super.applyState(nextState) } else { - //Persist the events and apply the new state after all event handlers were executed + // Persist the events and apply the new state after all event handlers were executed var nextData: D = stateData var handlersExecutedCounter = 0 @@ -490,7 +489,6 @@ object PersistentFSM { * Java API: compatible with lambda expressions * * Persistent Finite State Machine actor abstract base class. - * */ @deprecated("Use EventSourcedBehavior", "2.6.0") abstract class AbstractPersistentFSM[S <: FSMState, D, E] @@ -534,7 +532,6 @@ abstract class AbstractPersistentFSM[S <: FSMState, D, E] * Java API: compatible with lambda expressions * * Persistent Finite State Machine actor abstract base class with FSM Logging - * */ @nowarn("msg=deprecated") @deprecated("Use EventSourcedBehavior", "2.6.0") diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala index e3fc9638a5..faec7a1807 100644 --- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala +++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala @@ -94,7 +94,6 @@ import akka.util.unused * cancelTimer("tock") * isTimerActive("tock") * - * */ @deprecated("Use EventSourcedBehavior", "2.6.0") trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging { @@ -627,7 +626,6 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging /** * Stackable trait for [[akka.actor.FSM]] which adds a rolling event log and * debug logging capabilities (analogous to [[akka.event.LoggingReceive]]). - * */ @deprecated("Use EventSourcedBehavior", "2.6.0") trait LoggingPersistentFSM[S, D, E] extends PersistentFSMBase[S, D, E] { this: Actor => @@ -697,7 +695,6 @@ trait LoggingPersistentFSM[S, D, E] extends PersistentFSMBase[S, D, E] { this: A /** * Java API: compatible with lambda expressions - * */ @deprecated("Use EventSourcedBehavior", "2.6.0") object AbstractPersistentFSMBase { @@ -717,7 +714,6 @@ object AbstractPersistentFSMBase { * Java API: compatible with lambda expressions * * Finite State Machine actor abstract base class. - * */ @deprecated("Use EventSourcedBehavior", "2.6.0") abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D, E] { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala index e4680bed2d..712d7facc1 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala @@ -12,7 +12,7 @@ import akka.persistence.PersistentRepr * Asynchronous message replay and sequence number recovery interface. */ trait AsyncRecovery { - //#journal-plugin-api + // #journal-plugin-api /** * Plugin API: asynchronously replays persistent messages. Implementations replay * a message by calling `replayCallback`. The returned future must be completed @@ -68,5 +68,5 @@ trait AsyncRecovery { * snapshot or `0L` if no snapshot is used. */ def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] - //#journal-plugin-api + // #journal-plugin-api } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala index 714b27c8ab..8d0b64ea3f 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala @@ -198,7 +198,7 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { } } - //#journal-plugin-api + // #journal-plugin-api /** * Plugin API: asynchronously writes a batch (`Seq`) of persistent messages to the * journal. @@ -283,10 +283,9 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { * * Allows plugin implementers to use `f pipeTo self` and * handle additional messages for implementing advanced features - * */ def receivePluginInternal: Actor.Receive = Actor.emptyBehavior - //#journal-plugin-api + // #journal-plugin-api } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala index d3ae827199..0c1c98cd45 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala @@ -31,7 +31,7 @@ trait EventAdapter extends WriteEventAdapter with ReadEventAdapter * */ trait WriteEventAdapter { - //#event-adapter-api + // #event-adapter-api /** * Return the manifest (type hint) that will be provided in the `fromJournal` method. * Use `""` if manifest is not needed. @@ -52,7 +52,7 @@ trait WriteEventAdapter { * @return the adapted event object, possibly the same object if no adaptation was performed */ def toJournal(event: Any): Any - //#event-adapter-api + // #event-adapter-api } /** @@ -66,7 +66,7 @@ trait WriteEventAdapter { * */ trait ReadEventAdapter { - //#event-adapter-api + // #event-adapter-api /** * Convert a event from its journal model to the applications domain model. * @@ -81,7 +81,7 @@ trait ReadEventAdapter { * @return sequence containing the adapted events (possibly zero) which will be delivered to the PersistentActor */ def fromJournal(event: Any, manifest: String): EventSeq - //#event-adapter-api + // #event-adapter-api } sealed abstract class EventSeq { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala index 37dd5e7330..0a211f5072 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala @@ -85,7 +85,7 @@ private[akka] object EventAdapters { } require( adapterNames(boundAdapter.toString), s"$fqn was bound to undefined event-adapter: $boundAdapter (bindings: ${boundToAdapters - .mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})") + .mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})") // A Map of handler from alias to implementation (i.e. class implementing akka.serialization.Serializer) // For example this defines a handler named 'country': `"country" -> com.example.comain.CountryTagsAdapter` @@ -154,12 +154,12 @@ private[akka] object EventAdapters { */ private def sort[T](in: Iterable[(Class[_], T)]): immutable.Seq[(Class[_], T)] = in.foldLeft(new ArrayBuffer[(Class[_], T)](in.size)) { (buf, ca) => - buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { - case -1 => buf.append(ca) - case x => buf.insert(x, ca) - } - buf + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) } + buf + } .to(immutable.Seq) private final def configToMap(config: Config, path: String): Map[String, String] = { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala index 8aaf94a945..8e280c12fd 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala @@ -80,7 +80,8 @@ private[akka] class ReplayFilter( if (r.persistent.writerUuid == writerUuid) { // from same writer if (r.persistent.sequenceNr < seqNo) { - val errMsg = s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] as " + + val errMsg = + s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] as " + s"the sequenceNr should be equal to or greater than already-processed event [sequenceNr=${seqNo}, writerUUID=${writerUuid}] from the same writer, for the same persistenceId [${r.persistent.persistenceId}]. " + "Perhaps, events were journaled out of sequence, or duplicate persistenceId for different entities?" logIssue(errMsg) @@ -98,7 +99,8 @@ private[akka] class ReplayFilter( } else if (oldWriters.contains(r.persistent.writerUuid)) { // from old writer - val errMsg = s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}]. " + + val errMsg = + s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}]. " + s"There was already a newer writer whose last replayed event was [sequenceNr=${seqNo}, writerUUID=${writerUuid}] for the same persistenceId [${r.persistent.persistenceId}]." + "Perhaps, the old writer kept journaling messages after the new writer created, or duplicate persistenceId for different entities?" logIssue(errMsg) @@ -124,7 +126,8 @@ private[akka] class ReplayFilter( while (iter.hasNext()) { val msg = iter.next() if (msg.persistent.sequenceNr >= seqNo) { - val errMsg = s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] from a new writer. " + + val errMsg = + s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] from a new writer. " + s"An older writer already sent an event [sequenceNr=${msg.persistent.sequenceNr}, writerUUID=${msg.persistent.writerUuid}] whose sequence number was equal or greater for the same persistenceId [${r.persistent.persistenceId}]. " + "Perhaps, the new writer journaled the event out of sequence, or duplicate persistenceId for different entities?" logIssue(errMsg) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala index a1447cc85c..db69815a36 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala @@ -154,9 +154,9 @@ object InmemJournal { } messages = messages + (messages.get(p.persistenceId) match { - case Some(ms) => p.persistenceId -> (ms :+ pr) - case None => p.persistenceId -> Vector(pr) - }) + case Some(ms) => p.persistenceId -> (ms :+ pr) + case None => p.persistenceId -> Vector(pr) + }) highestSequenceNumbers = highestSequenceNumbers.updated(p.persistenceId, math.max(highestSequenceNr(p.persistenceId), p.sequenceNr)) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala index 9d33e2a494..2a812abe3d 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala @@ -20,9 +20,10 @@ abstract class AsyncRecovery extends SAsyncReplay with AsyncRecoveryPlugin { thi final def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( replayCallback: (PersistentRepr) => Unit) = - doAsyncReplayMessages(persistenceId, fromSequenceNr, toSequenceNr, max, new Consumer[PersistentRepr] { - def accept(p: PersistentRepr) = replayCallback(p) - }).map(_ => ()) + doAsyncReplayMessages(persistenceId, fromSequenceNr, toSequenceNr, max, + new Consumer[PersistentRepr] { + def accept(p: PersistentRepr) = replayCallback(p) + }).map(_ => ()) final def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = doAsyncReadHighestSequenceNr(persistenceId, fromSequenceNr: Long).map(_.longValue) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala index ac97dd52f9..af93dc2f09 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala @@ -57,7 +57,6 @@ private[persistence] trait LeveldbCompaction extends Actor with ActorLogging wit * considered to be those which include sequence numbers up to 'toSeqNr' AND whose size is equal to N (the compaction * interval). This rule implies that if 'toSeqNr' spans an incomplete portion of a rightmost segment, then * that segment will be omitted from the pending compaction, and will be included into the next one. - * */ private[persistence] trait CompactionSegmentManagement { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala index cff03f0d74..e39f546456 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala @@ -64,7 +64,7 @@ private[persistence] trait LeveldbRecovery extends AsyncRecovery { this: Leveldb val nextEntry = iter.peekNext() val nextKey = keyFromBytes(nextEntry.getKey) if (key.persistenceId == nextKey.persistenceId && key.sequenceNr == nextKey.sequenceNr && isDeletionKey( - nextKey)) { + nextKey)) { iter.next() true } else false diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala index 11f30bbe2f..6baf60c3a0 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala @@ -114,28 +114,29 @@ private[persistence] trait LeveldbStore def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = try Future.successful { - withBatch { batch => - val nid = numericId(persistenceId) + withBatch { batch => + val nid = numericId(persistenceId) - // seek to first existing message - val fromSequenceNr = withIterator { iter => - val startKey = Key(nid, 1L, 0) - iter.seek(keyToBytes(startKey)) - if (iter.hasNext) keyFromBytes(iter.peekNext().getKey).sequenceNr else Long.MaxValue - } - - if (fromSequenceNr != Long.MaxValue) { - val toSeqNr = math.min(toSequenceNr, readHighestSequenceNr(nid)) - var sequenceNr = fromSequenceNr - while (sequenceNr <= toSeqNr) { - batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) - sequenceNr += 1 + // seek to first existing message + val fromSequenceNr = withIterator { iter => + val startKey = Key(nid, 1L, 0) + iter.seek(keyToBytes(startKey)) + if (iter.hasNext) keyFromBytes(iter.peekNext().getKey).sequenceNr else Long.MaxValue } - self ! LeveldbCompaction.TryCompactLeveldb(persistenceId, toSeqNr) + if (fromSequenceNr != Long.MaxValue) { + val toSeqNr = math.min(toSequenceNr, readHighestSequenceNr(nid)) + var sequenceNr = fromSequenceNr + while (sequenceNr <= toSeqNr) { + batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) + sequenceNr += 1 + } + + self ! LeveldbCompaction.TryCompactLeveldb(persistenceId, toSeqNr) + } } } - } catch { + catch { case NonFatal(e) => Future.failed(e) } diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala index 9dc69e418e..5f049e2300 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala @@ -123,7 +123,7 @@ trait SnapshotStore extends Actor with ActorLogging { private def tryReceivePluginInternal(evt: Any): Unit = if (receivePluginInternal.isDefinedAt(evt)) receivePluginInternal(evt) - //#snapshot-store-plugin-api + // #snapshot-store-plugin-api /** * Plugin API: asynchronously loads a snapshot. @@ -176,5 +176,5 @@ trait SnapshotStore extends Actor with ActorLogging { * handle additional messages for implementing advanced features */ def receivePluginInternal: Actor.Receive = Actor.emptyBehavior - //#snapshot-store-plugin-api + // #snapshot-store-plugin-api } diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala index db97915f2f..0afef8cc74 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala @@ -90,7 +90,8 @@ object AtLeastOnceDeliverySpec { persistAsync(AcceptedReq(payload, destination)) { evt => updateState(evt) sender() ! ReqAck - } else + } + else persist(AcceptedReq(payload, destination)) { evt => updateState(evt) sender() ! ReqAck @@ -103,7 +104,8 @@ object AtLeastOnceDeliverySpec { if (async) persistAsync(ReqDone(id)) { evt => updateState(evt) - } else + } + else persist(ReqDone(id)) { evt => updateState(evt) } @@ -195,7 +197,7 @@ class AtLeastOnceDeliverySpec "AtLeastOnceDelivery" must { List(true, false).foreach { deliverUsingActorSelection => - s"deliver messages in order when nothing is lost (using actorSelection: $deliverUsingActorSelection)" taggedAs (TimingTest) in { + s"deliver messages in order when nothing is lost (using actorSelection: $deliverUsingActorSelection)" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val destinations = Map("A" -> system.actorOf(destinationProps(probeA.ref)).path) @@ -206,7 +208,7 @@ class AtLeastOnceDeliverySpec probeA.expectNoMessage(1.second) } - s"re-deliver lost messages (using actorSelection: $deliverUsingActorSelection)" taggedAs (TimingTest) in { + s"re-deliver lost messages (using actorSelection: $deliverUsingActorSelection)" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -247,7 +249,7 @@ class AtLeastOnceDeliverySpec expectMsgType[Failure[_]].toString should include("not supported") } - "re-deliver lost messages after restart" taggedAs (TimingTest) in { + "re-deliver lost messages after restart" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -281,7 +283,7 @@ class AtLeastOnceDeliverySpec probeA.expectNoMessage(1.second) } - "re-send replayed deliveries with an 'initially in-order' strategy, before delivering fresh messages" taggedAs (TimingTest) in { + "re-send replayed deliveries with an 'initially in-order' strategy, before delivering fresh messages" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -318,7 +320,7 @@ class AtLeastOnceDeliverySpec probeA.expectNoMessage(1.second) } - "restore state from snapshot" taggedAs (TimingTest) in { + "restore state from snapshot" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -356,7 +358,7 @@ class AtLeastOnceDeliverySpec probeA.expectNoMessage(1.second) } - "warn about unconfirmed messages" taggedAs (TimingTest) in { + "warn about unconfirmed messages" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val probeB = TestProbe() @@ -378,7 +380,7 @@ class AtLeastOnceDeliverySpec system.stop(snd) } - "re-deliver many lost messages" taggedAs (TimingTest) in { + "re-deliver many lost messages" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val probeB = TestProbe() @@ -410,7 +412,7 @@ class AtLeastOnceDeliverySpec (1 to N).map(n => "c-" + n).toSet) } - "limit the number of messages redelivered at once" taggedAs (TimingTest) in { + "limit the number of messages redelivered at once" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) diff --git a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala index 7e34197e98..0da36e6749 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala @@ -15,7 +15,8 @@ import akka.testkit.ImplicitSender object EventAdapterSpec { - final val JournalModelClassName = classOf[EventAdapterSpec].getCanonicalName + "$" + classOf[JournalModel].getSimpleName + final val JournalModelClassName = + classOf[EventAdapterSpec].getCanonicalName + "$" + classOf[JournalModel].getSimpleName trait JournalModel { def payload: Any def tags: immutable.Set[String] diff --git a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala index 0066cde5e0..6fe42fe9a8 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala @@ -34,7 +34,7 @@ object PerformanceSpec { def stopMeasure(): Double = { stopTime = System.nanoTime - (NanoToSecond * numberOfMessages / (stopTime - startTime)) + NanoToSecond * numberOfMessages / (stopTime - startTime) } } diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala index 91c8ff7e86..c7e1664cef 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala @@ -109,16 +109,17 @@ trait PersistenceMatchers { final class IndependentlyOrdered(prefixes: immutable.Seq[String]) extends Matcher[immutable.Seq[Any]] { override def apply(_left: immutable.Seq[Any]) = { val left = _left.map(_.toString) - val mapped = left.groupBy(l => prefixes.indexWhere(p => l.startsWith(p))) - (-1) // ignore other messages - val results = for { - (pos, seq) <- mapped - nrs = seq.map(_.replaceFirst(prefixes(pos), "").toInt) - sortedNrs = nrs.sorted - if nrs != sortedNrs - } yield MatchResult( - false, - s"""Messages sequence with prefix ${prefixes(pos)} was not sorted! Was: $seq"""", - s"""Messages sequence with prefix ${prefixes(pos)} was sorted! Was: $seq"""") + val mapped = left.groupBy(l => prefixes.indexWhere(p => l.startsWith(p))) - -1 // ignore other messages + val results = + for { + (pos, seq) <- mapped + nrs = seq.map(_.replaceFirst(prefixes(pos), "").toInt) + sortedNrs = nrs.sorted + if nrs != sortedNrs + } yield MatchResult( + false, + s"""Messages sequence with prefix ${prefixes(pos)} was not sorted! Was: $seq"""", + s"""Messages sequence with prefix ${prefixes(pos)} was sorted! Was: $seq"""") if (results.forall(_.matches)) MatchResult(true, "", "") else results.find(r => !r.matches).get diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala index c3460c36ca..34320b640c 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala @@ -100,14 +100,14 @@ class ThrowExceptionStrategyPersistentActorBoundedStashingSpec persistentActor ! GetState expectMsg(Nil) - //barrier for stash + // barrier for stash persistentActor ! Cmd("a") - //internal stash overflow + // internal stash overflow (1 to (capacity + 1)).foreach(persistentActor ! Cmd(_)) - //after PA stopped, all stashed messages forward to deadletters - //the message triggering the overflow is lost, so we get one less message than we sent + // after PA stopped, all stashed messages forward to deadletters + // the message triggering the overflow is lost, so we get one less message than we sent (1 to capacity).foreach(i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) // send another message to the now dead actor and make sure that it goes to dead letters @@ -125,21 +125,21 @@ class DiscardStrategyPersistentActorBoundedStashingSpec awaitAssert(SteppingInmemJournal.getRef("persistence-bounded-stash"), 3.seconds) val journal = SteppingInmemJournal.getRef("persistence-bounded-stash") - //initial read highest + // initial read highest SteppingInmemJournal.step(journal) // make sure it's fully started first persistentActor ! GetState expectMsg(Nil) - //barrier for stash + // barrier for stash persistentActor ! Cmd("a") - //internal stash overflow after 10 + // internal stash overflow after 10 (1 to (2 * capacity)).foreach(persistentActor ! Cmd(_)) - //so, 11 to 20 discard to deadletter + // so, 11 to 20 discard to deadletter ((1 + capacity) to (2 * capacity)).foreach(i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) - //allow "a" and 1 to 10 write complete + // allow "a" and 1 to 10 write complete (1 to (1 + capacity)).foreach(_ => SteppingInmemJournal.step(journal)) persistentActor ! GetState @@ -157,21 +157,21 @@ class ReplyToStrategyPersistentActorBoundedStashingSpec awaitAssert(SteppingInmemJournal.getRef("persistence-bounded-stash"), 3.seconds) val journal = SteppingInmemJournal.getRef("persistence-bounded-stash") - //initial read highest + // initial read highest SteppingInmemJournal.step(journal) // make sure it's fully started first persistentActor ! GetState expectMsg(Nil) - //barrier for stash + // barrier for stash persistentActor ! Cmd("a") - //internal stash overflow after 10 + // internal stash overflow after 10 (1 to (2 * capacity)).foreach(persistentActor ! Cmd(_)) - //so, 11 to 20 reply to with "Reject" String + // so, 11 to 20 reply to with "Reject" String ((1 + capacity) to (2 * capacity)).foreach(_ => expectMsg("RejectToStash")) - //allow "a" and 1 to 10 write complete + // allow "a" and 1 to 10 write complete (1 to (1 + capacity)).foreach(_ => SteppingInmemJournal.step(journal)) persistentActor ! GetState diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala index 1b386af737..1c43b6a8bf 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala @@ -770,7 +770,7 @@ object PersistentActorSpec { probe ! inner Thread.sleep(1000) // really long wait here... // the next incoming command must be handled by the following function - context.become({ case _ => sender() ! "done" }) + context.become { case _ => sender() ! "done" } } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala index 36f55f23ff..f9493a855e 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala @@ -131,7 +131,7 @@ class SnapshotFailureRobustnessSpec expectMsgPF() { case (SnapshotMetadata(`persistenceId`, 1, timestamp), state) => state should ===("blahonga") - timestamp should be > (0L) + timestamp should be > 0L } expectMsg("kablama-2") expectMsg(RecoveryCompleted) diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala index eb26d041c2..ee5a3453c2 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala @@ -97,7 +97,7 @@ class SnapshotSerializationSpec expectMsgPF() { case (SnapshotMetadata(`persistenceId`, 0, timestamp), state) => state should ===(new MySnapshot("blahonga")) - timestamp should be > (0L) + timestamp should be > 0L } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala index f713fe8ce7..d0360a76f1 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala @@ -111,7 +111,7 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, timestamp), state) => state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) - timestamp should be > (0L) + timestamp should be > 0L } expectMsg("e-5") expectMsg("f-6") @@ -135,7 +135,7 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => state should ===(List("a-1", "b-2").reverse) - timestamp should be > (0L) + timestamp should be > 0L } expectMsg("c-3") expectMsg(RecoveryCompleted) @@ -150,7 +150,7 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, timestamp), state) => state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) - timestamp should be > (0L) + timestamp should be > 0L } expectMsg(RecoveryCompleted) expectMsg("done") @@ -163,7 +163,7 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => state should ===(List("a-1", "b-2").reverse) - timestamp should be > (0L) + timestamp should be > 0L } expectMsg("c-3") expectMsg("d-4") @@ -179,7 +179,7 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => state should ===(List("a-1", "b-2").reverse) - timestamp should be > (0L) + timestamp should be > 0L } expectMsg("c-3") expectMsg(RecoveryCompleted) diff --git a/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala index c997020259..f90c5684a0 100644 --- a/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala @@ -70,9 +70,10 @@ object TimerPersistentActorSpec { BoxedUnit.UNIT case msg => timers.startSingleTimer("key", Scheduled(msg, sender()), Duration.Zero) - persist(msg, new Procedure[Any] { - override def apply(evt: Any): Unit = () - }) + persist(msg, + new Procedure[Any] { + override def apply(evt: Any): Unit = () + }) BoxedUnit.UNIT }) } diff --git a/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala b/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala index a729fb067b..85382ae6e9 100644 --- a/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala @@ -23,7 +23,7 @@ import akka.testkit._ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) with ImplicitSender { import PersistentFSMSpec._ - //Dummy report actor, for tests that don't need it + // Dummy report actor, for tests that don't need it val dummyReportActorRef = TestProbe().ref "PersistentFSM" must { @@ -274,13 +274,13 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) val persistentEventsStreamer = system.actorOf(PersistentEventsStreamer.props(persistenceId, testActor)) expectMsg(ItemAdded(Item("1", "Shirt", 59.99f))) - expectMsgType[StateChangeEvent] //because a timeout is defined, State Change is persisted + expectMsgType[StateChangeEvent] // because a timeout is defined, State Change is persisted expectMsg(ItemAdded(Item("2", "Shoes", 89.99f))) - expectMsgType[StateChangeEvent] //because a timeout is defined, State Change is persisted + expectMsgType[StateChangeEvent] // because a timeout is defined, State Change is persisted expectMsg(ItemAdded(Item("3", "Coat", 119.99f))) - expectMsgType[StateChangeEvent] //because a timeout is defined, State Change is persisted + expectMsgType[StateChangeEvent] // because a timeout is defined, State Change is persisted watch(persistentEventsStreamer) persistentEventsStreamer ! PoisonPill @@ -325,7 +325,7 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) fsmRef ! PoisonPill expectTerminated(fsmRef) - //Check that PersistentFSM recovers in the correct state + // Check that PersistentFSM recovers in the correct state val recoveredFsmRef = system.actorOf(WebStoreCustomerFSM.props(persistenceId, dummyReportActorRef)) recoveredFsmRef ! GetCurrentCart expectMsg(NonEmptyShoppingCart(List(shirt, shoes, coat))) @@ -334,7 +334,7 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) recoveredFsmRef ! PoisonPill expectTerminated(recoveredFsmRef) - //Check that PersistentFSM uses snapshot during recovery + // Check that PersistentFSM uses snapshot during recovery val persistentEventsStreamer = system.actorOf(PersistentEventsStreamer.props(persistenceId, testActor)) expectMsgPF() { @@ -382,8 +382,8 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) // Otherwise, stateData sent to this probe is already updated probe.expectMsg("SeqNo=3, StateData=List(3, 2, 1)") - fsmRef ! "4x" //changes the state to Persist4xAtOnce, also updates SeqNo although nothing is persisted - fsmRef ! 10 //Persist4xAtOnce = persist 10, 4x times + fsmRef ! "4x" // changes the state to Persist4xAtOnce, also updates SeqNo although nothing is persisted + fsmRef ! 10 // Persist4xAtOnce = persist 10, 4x times // snapshot-after = 3, but the SeqNo is not multiple of 3, // as saveStateSnapshot() is called at the end of persistent event "batch" = 4x of 10's. probe.expectMsg("SeqNo=8, StateData=List(10, 10, 10, 10, 3, 2, 1)") @@ -400,7 +400,7 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) @nowarn("msg=deprecated") object PersistentFSMSpec { - //#customer-states + // #customer-states sealed trait UserState extends FSMState case object LookingAround extends UserState { override def identifier: String = "Looking Around" @@ -414,9 +414,9 @@ object PersistentFSMSpec { case object Paid extends UserState { override def identifier: String = "Paid" } - //#customer-states + // #customer-states - //#customer-states-data + // #customer-states-data case class Item(id: String, name: String, price: Float) sealed trait ShoppingCart { @@ -431,25 +431,25 @@ object PersistentFSMSpec { def addItem(item: Item) = NonEmptyShoppingCart(items :+ item) def empty() = EmptyShoppingCart } - //#customer-states-data + // #customer-states-data - //#customer-commands + // #customer-commands sealed trait Command case class AddItem(item: Item) extends Command case object Buy extends Command case object Leave extends Command case object GetCurrentCart extends Command - //#customer-commands + // #customer-commands - //#customer-domain-events + // #customer-domain-events sealed trait DomainEvent case class ItemAdded(item: Item) extends DomainEvent case object OrderExecuted extends DomainEvent case object OrderDiscarded extends DomainEvent case object CustomerInactive extends DomainEvent - //#customer-domain-events + // #customer-domain-events - //Side effects - report events to be sent to some "Report Actor" + // Side effects - report events to be sent to some "Report Actor" sealed trait ReportEvent case class PurchaseWasMade(items: Seq[Item]) extends ReportEvent case object ShoppingCardDiscarded extends ReportEvent @@ -484,7 +484,7 @@ object PersistentFSMSpec { override def persistenceId = _persistenceId - //#customer-fsm-body + // #customer-fsm-body startWith(LookingAround, EmptyShoppingCart) when(LookingAround) { @@ -498,24 +498,24 @@ object PersistentFSMSpec { case Event(AddItem(item), _) => stay().applying(ItemAdded(item)).forMax(1 seconds) case Event(Buy, _) => - //#customer-andthen-example + // #customer-andthen-example goto(Paid).applying(OrderExecuted).andThen { case NonEmptyShoppingCart(items) => reportActor ! PurchaseWasMade(items) - //#customer-andthen-example + // #customer-andthen-example saveStateSnapshot() case EmptyShoppingCart => saveStateSnapshot() - //#customer-andthen-example + // #customer-andthen-example } - //#customer-andthen-example + // #customer-andthen-example case Event(Leave, _) => - //#customer-snapshot-example + // #customer-snapshot-example stop().applying(OrderDiscarded).andThen { case _ => reportActor ! ShoppingCardDiscarded saveStateSnapshot() } - //#customer-snapshot-example + // #customer-snapshot-example case Event(GetCurrentCart, data) => stay().replying(data) case Event(StateTimeout, _) => @@ -536,7 +536,7 @@ object PersistentFSMSpec { case Event(GetCurrentCart, data) => stay().replying(data) } - //#customer-fsm-body + // #customer-fsm-body /** * Override this handler to define the action on Domain Event @@ -544,7 +544,7 @@ object PersistentFSMSpec { * @param event domain event to apply * @param cartBeforeEvent state data of the previous state */ - //#customer-apply-event + // #customer-apply-event override def applyEvent(event: DomainEvent, cartBeforeEvent: ShoppingCart): ShoppingCart = { event match { case ItemAdded(item) => cartBeforeEvent.addItem(item) @@ -553,7 +553,7 @@ object PersistentFSMSpec { case CustomerInactive => cartBeforeEvent } } - //#customer-apply-event + // #customer-apply-event } object WebStoreCustomerFSM { diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala index 6fdf5ef7fc..b8bfd4871b 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala @@ -42,22 +42,24 @@ class ChaosJournal extends AsyncWriteJournal { override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = try Future.successful { - if (shouldFail(writeFailureRate)) throw new WriteFailedException(messages.flatMap(_.payload)) - else - for (a <- messages) yield { - a.payload.foreach(add) - AsyncWriteJournal.successUnit - } - } catch { + if (shouldFail(writeFailureRate)) throw new WriteFailedException(messages.flatMap(_.payload)) + else + for (a <- messages) yield { + a.payload.foreach(add) + AsyncWriteJournal.successUnit + } + } + catch { case NonFatal(e) => Future.failed(e) } override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = { try Future.successful { - (1L to toSequenceNr).foreach { snr => - del(persistenceId, snr) + (1L to toSequenceNr).foreach { snr => + del(persistenceId, snr) + } } - } catch { + catch { case NonFatal(e) => Future.failed(e) } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala index a60b19bff0..9eceb9c277 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala @@ -304,7 +304,7 @@ abstract class RemotingFeaturesSpec(val multiNodeConfig: RemotingFeaturesConfig) "A remote round robin pool" must { s"${if (useUnsafe) "be instantiated on remote node and communicate through its RemoteActorRef" - else "not be instantiated on remote node and communicate through its LocalActorRef "} " in { + else "not be instantiated on remote node and communicate through its LocalActorRef "} " in { runOn(first, second, third) { enterBarrier("start", "broadcast-end", "end") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala index 2f7c24e38f..a22c328ade 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala @@ -92,8 +92,8 @@ abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePie awaitAssert { system.actorSelection(RootActorPath(secondAddress) / "user" / "subject") ! "identify" val (uidSecond, subjectSecond) = expectMsgType[(Long, ActorRef)](1.second) - uidSecond should not be (uidFirst) - subjectSecond should not be (subjectFirst) + uidSecond should not be uidFirst + subjectSecond should not be subjectFirst } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala index 8351c4db2e..2c03f36ef9 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala @@ -153,7 +153,8 @@ abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymen // The quarantine of node 2, where the Parent lives, should cause the Hello child to be stopped: expectMsg("PostStop") expectNoMessage() - } else expectNoMessage(sleepAfterKill) + } + else expectNoMessage(sleepAfterKill) awaitAssert(node(second), 10.seconds, 100.millis) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala index 82b73f5e2b..3903cc4b5c 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala @@ -25,6 +25,4 @@ abstract class RemotingMultiNodeSpec(config: MultiNodeConfig) with Suite with STMultiNodeSpec with ImplicitSender - with DefaultTimeout { self: MultiNodeSpec => - -} + with DefaultTimeout { self: MultiNodeSpec => } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala index 10ae659a8d..9cc7502ab2 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala @@ -107,7 +107,7 @@ abstract class TransportFailSpec extends RemotingMultiNodeSpec(TransportFailConf def identify(role: RoleName, actorName: String): ActorRef = { val p = TestProbe() - (system.actorSelection(node(role) / "user" / actorName)).tell(Identify(actorName), p.ref) + system.actorSelection(node(role) / "user" / actorName).tell(Identify(actorName), p.ref) p.expectMsgType[ActorIdentity](remainingOrDefault).ref.get } @@ -143,10 +143,10 @@ abstract class TransportFailSpec extends RemotingMultiNodeSpec(TransportFailConf var subject2: ActorRef = null awaitAssert({ - within(1.second) { - subject2 = identify(second, "subject2") - } - }, max = 5.seconds) + within(1.second) { + subject2 = identify(second, "subject2") + } + }, max = 5.seconds) watch(subject2) quarantineProbe.expectNoMessage(1.seconds) subject2 ! "hello2" diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala index ddf01d5cb7..ac5ca777f6 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala @@ -112,8 +112,7 @@ object MaxThroughputSpec extends MultiNodeConfig { class Receiver(reporter: RateReporter, payloadSize: Int, numSenders: Int) extends Actor { private var c = 0L private var endMessagesMissing = numSenders - private var correspondingSender - : ActorRef = null // the Actor which send the Start message will also receive the report + private var correspondingSender: ActorRef = null // the Actor which send the Start message will also receive the report def receive = { case msg: Array[Byte] => @@ -246,19 +245,20 @@ object MaxThroughputSpec extends MultiNodeConfig { val waitingForEndResult: Receive = { case EndResult(totalReceived) => val took = NANOSECONDS.toMillis(System.nanoTime - startTime) - val throughput = (totalReceived * 1000.0 / took) + val throughput = totalReceived * 1000.0 / took reporter.reportResults(s"=== ${reporter.testName} ${self.path.name}: " + - f"throughput ${throughput * testSettings.senderReceiverPairs}%,.0f msg/s, " + - f"${throughput * payloadSize * testSettings.senderReceiverPairs}%,.0f bytes/s (payload), " + - f"${throughput * totalSize(context.system) * testSettings.senderReceiverPairs}%,.0f bytes/s (total" + - (if (RARP(context.system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) ",compression" else "") + "), " + - (if (testSettings.senderReceiverPairs == 1) s"dropped ${totalMessages - totalReceived}, " else "") + - s"max round-trip $maxRoundTripMillis ms, " + - s"burst size $burstSize, " + - s"payload size $payloadSize, " + - s"total size ${totalSize(context.system)}, " + - s"$took ms to deliver $totalReceived messages.") + f"throughput ${throughput * testSettings.senderReceiverPairs}%,.0f msg/s, " + + f"${throughput * payloadSize * testSettings.senderReceiverPairs}%,.0f bytes/s (payload), " + + f"${throughput * totalSize(context.system) * testSettings.senderReceiverPairs}%,.0f bytes/s (total" + + (if (RARP(context.system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) ",compression" + else "") + "), " + + (if (testSettings.senderReceiverPairs == 1) s"dropped ${totalMessages - totalReceived}, " else "") + + s"max round-trip $maxRoundTripMillis ms, " + + s"burst size $burstSize, " + + s"payload size $payloadSize, " + + s"total size ${totalSize(context.system)}, " + + s"$took ms to deliver $totalReceived messages.") plotRef ! PlotResult().add(testName, throughput * payloadSize * testSettings.senderReceiverPairs / 1024 / 1024) context.stop(self) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala index 74ed69f586..850b6a864d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala @@ -126,7 +126,7 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo freshSystem .actorSelection(RootActorPath(firstAddress) / "user" / "subject") .tell(Identify("subject"), probe.ref) - probe.expectMsgType[ActorIdentity](5.seconds).ref should not be (None) + probe.expectMsgType[ActorIdentity](5.seconds).ref should not be None // Now the other system will be able to pass, too freshSystem.actorOf(Props[Subject](), "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala index 722ec33785..8d01228d2e 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala @@ -128,9 +128,9 @@ abstract class SurviveInboundStreamRestartWithCompressionInFlightSpec // we poke the remote system, awaiting its inbound stream recovery, then it should reply awaitAssert({ - sendToB ! "alive-again" - expectMsg(300.millis, s"${sendToB.path.name}-alive-again") - }, max = 5.seconds, interval = 500.millis) + sendToB ! "alive-again" + expectMsg(300.millis, s"${sendToB.path.name}-alive-again") + }, max = 5.seconds, interval = 500.millis) // we continue sending messages using the "old table". // if a new table was being built, it would cause the b to be compressed as 1 causing a wrong reply to come back diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala index 5ec2aba08b..36a71105a7 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala @@ -7,22 +7,23 @@ package akka.remote.artery import java.util.concurrent.TimeUnit.SECONDS class TestRateReporter(name: String) - extends RateReporter(SECONDS.toNanos(1), new RateReporter.Reporter { - override def onReport( - messagesPerSec: Double, - bytesPerSec: Double, - totalMessages: Long, - totalBytes: Long): Unit = { - if (totalBytes > 0) { - println( - name + - f": ${messagesPerSec}%,.0f msgs/sec, ${bytesPerSec}%,.0f bytes/sec, " + - f"totals ${totalMessages}%,d messages ${totalBytes / (1024 * 1024)}%,d MB") - } else { - println( - name + - f": ${messagesPerSec}%,.0f msgs/sec " + - f"total ${totalMessages}%,d messages") + extends RateReporter(SECONDS.toNanos(1), + new RateReporter.Reporter { + override def onReport( + messagesPerSec: Double, + bytesPerSec: Double, + totalMessages: Long, + totalBytes: Long): Unit = { + if (totalBytes > 0) { + println( + name + + f": ${messagesPerSec}%,.0f msgs/sec, ${bytesPerSec}%,.0f bytes/sec, " + + f"totals ${totalMessages}%,d messages ${totalBytes / (1024 * 1024)}%,d MB") + } else { + println( + name + + f": ${messagesPerSec}%,.0f msgs/sec " + + f"total ${totalMessages}%,d messages") + } } - } - }) {} + }) {} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala index 0b973102e2..9f0927b60f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala @@ -86,9 +86,9 @@ class RemoteRandomSpec(multiNodeConfig: RemoteRandomConfig) actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5.seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5.seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala index 14e6b83c18..83ce02ee70 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala @@ -102,11 +102,11 @@ class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig) actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5 seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => info(s"reply from $ref") ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } @@ -190,9 +190,9 @@ class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig) actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5 seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala index b59018a5aa..bd7ffbff81 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala @@ -98,9 +98,9 @@ class RemoteScatterGatherSpec(multiNodeConfig: RemoteScatterGatherConfig) actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5.seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5.seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 3b730596a9..dc3183532c 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -45,10 +45,10 @@ class TestConductorSpec extends RemotingMultiNodeSpec(TestConductorMultiJvmSpec) "enter a barrier" taggedAs LongRunningTest in { runOn(leader) { system.actorOf(Props(new Actor { - def receive = { - case x => testActor ! x; sender() ! x - } - }).withDeploy(Deploy.local), "echo") + def receive = { + case x => testActor ! x; sender() ! x + } + }).withDeploy(Deploy.local), "echo") } enterBarrier("name") diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index 341bf3dbed..9c2c1a328b 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -138,7 +138,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { if thr == ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, thr.data.deadline), B) => case x => fail( - "Expected " + Failed(barrier, ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, null), B)) + " but got " + x) + "Expected " + Failed(barrier, + ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, null), B)) + " but got " + x) } } @@ -161,7 +162,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { if thr == ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, thr.data.deadline), B) => case x => fail( - "Expected " + Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, null), B)) + " but got " + x) + "Expected " + Failed(barrier, + ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, null), B)) + " but got " + x) } } @@ -198,12 +200,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { msg match { case Failed(_, thr: BarrierEmpty) if thr == BarrierEmpty( - Data(Set(), "", Nil, thr.data.deadline), - "cannot remove RoleName(a): no client to remove") => + Data(Set(), "", Nil, thr.data.deadline), + "cannot remove RoleName(a): no client to remove") => case x => fail("Expected " + Failed( barrier, - BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove")) + " but got " + x) + BarrierEmpty(Data(Set(), "", Nil, null), + "cannot remove RoleName(a): no client to remove")) + " but got " + x) } barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("bar9", None)) @@ -225,7 +228,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { if thr == BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, thr.data.deadline)) => case x => fail( - "Expected " + Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, null))) + " but got " + x) + "Expected " + Failed(barrier, + BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, null))) + " but got " + x) } } } diff --git a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala index 153f1f9a00..a6ec0ee195 100644 --- a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala +++ b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala @@ -121,8 +121,8 @@ final case class AckedSendBuffer[T <: HasSequenceNumber]( if (newNacked.size < ack.nacks.size) throw new ResendUnfulfillableException else this.copy(nonAcked = nonAcked.filter { m => - m.seq > ack.cumulativeAck - }, nacked = newNacked) + m.seq > ack.cumulativeAck + }, nacked = newNacked) } /** diff --git a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala index ed9b4c043b..87c8c17cc2 100644 --- a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala @@ -46,7 +46,7 @@ class DeadlineFailureDetector(val acceptableHeartbeatPause: FiniteDuration, val require(heartbeatInterval > Duration.Zero, "failure-detector.heartbeat-interval must be > 0 s") private val deadlineMillis = acceptableHeartbeatPause.toMillis + heartbeatInterval.toMillis - @volatile private var heartbeatTimestamp = 0L //not used until active (first heartbeat) + @volatile private var heartbeatTimestamp = 0L // not used until active (first heartbeat) @volatile private var active = false override def isAvailable: Boolean = isAvailable(clock()) diff --git a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala index 9bbb651d12..a5e0b7b8cb 100644 --- a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala @@ -15,7 +15,6 @@ import scala.collection.immutable.Map * * @param detectorFactory * By-name parameter that returns the failure detector instance to be used by a newly registered resource - * */ class DefaultFailureDetectorRegistry[A](detectorFactory: () => FailureDetector) extends FailureDetectorRegistry[A] { diff --git a/akka-remote/src/main/scala/akka/remote/Endpoint.scala b/akka-remote/src/main/scala/akka/remote/Endpoint.scala index b5f1c098bc..802723b4cb 100644 --- a/akka-remote/src/main/scala/akka/remote/Endpoint.scala +++ b/akka-remote/src/main/scala/akka/remote/Endpoint.scala @@ -100,7 +100,7 @@ private[remote] class DefaultMessageDispatcher( payload match { case sel: ActorSelectionMessage => if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) || - sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) + sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) log.debug( LogMarker.Security, "operating in UntrustedMode, dropping inbound actor selection to [{}], " + @@ -885,7 +885,7 @@ private[remote] class EndpointWriter( if (provider.remoteSettings.LogSend && log.isDebugEnabled) { def msgLog = s"RemoteMessage: [${s.message}] to [${s.recipient}]<+[${s.recipient.path}] from [${s.senderOption - .getOrElse(extendedSystem.deadLetters)}]" + .getOrElse(extendedSystem.deadLetters)}]" log.debug("sending message {}", msgLog) } @@ -1117,9 +1117,9 @@ private[remote] class EndpointReader( if (receiveBuffers.putIfAbsent(key, ResendState(uid, ackedReceiveBuffer)) ne null) updateSavedState(key, receiveBuffers.get(key)) } else if (!receiveBuffers.replace( - key, - expectedState, - merge(ResendState(uid, ackedReceiveBuffer), expectedState))) + key, + expectedState, + merge(ResendState(uid, ackedReceiveBuffer), expectedState))) updateSavedState(key, receiveBuffers.get(key)) } diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala index 4043a5ae10..5340364618 100644 --- a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala @@ -72,10 +72,10 @@ private[akka] object FailureDetectorLoader { .createInstanceFor[FailureDetector]( fqcn, List(classOf[Config] -> config, classOf[EventStream] -> system.eventStream)) - .recover({ + .recover { case e => throw new ConfigurationException(s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e) - }) + } .get } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 7351a854a1..15e64021c4 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -149,7 +149,6 @@ private[akka] object RemoteActorRefProvider { * Depending on this class is not supported, only the [[akka.actor.ActorRefProvider]] interface is supported. * * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. - * */ private[akka] class RemoteActorRefProvider( val systemName: String, @@ -161,7 +160,8 @@ private[akka] class RemoteActorRefProvider( val remoteSettings: RemoteSettings = new RemoteSettings(settings.config) - private[akka] final val hasClusterOrUseUnsafe = settings.HasCluster || remoteSettings.UseUnsafeRemoteFeaturesWithoutCluster + private[akka] final val hasClusterOrUseUnsafe = + settings.HasCluster || remoteSettings.UseUnsafeRemoteFeaturesWithoutCluster private val warnOnUnsafeRemote = !settings.HasCluster && @@ -250,7 +250,8 @@ private[akka] class RemoteActorRefProvider( case ArterySettings.AeronUpd => new ArteryAeronUdpTransport(system, this) case ArterySettings.Tcp => new ArteryTcpTransport(system, this, tlsEnabled = false) case ArterySettings.TlsTcp => new ArteryTcpTransport(system, this, tlsEnabled = true) - } else new Remoting(system, this)) + } + else new Remoting(system, this)) _internals = internals remotingTerminator ! internals @@ -345,14 +346,16 @@ private[akka] class RemoteActorRefProvider( if (warnOnUnsafeRemote) log.warning(message) else log.debug(message) - /** Logs if deathwatch message is intentionally dropped. To disable + /** + * Logs if deathwatch message is intentionally dropped. To disable * warnings set `akka.remote.warn-unsafe-watch-outside-cluster` to `off` * or use Akka Cluster. */ private[akka] def warnIfUnsafeDeathwatchWithoutCluster(watchee: ActorRef, watcher: ActorRef, action: String): Unit = warnOnUnsafe(s"Dropped remote $action: disabled for [$watcher -> $watchee]") - /** If `warnOnUnsafeRemote`, this logs a warning if `actorOf` falls back to `LocalActorRef` + /** + * If `warnOnUnsafeRemote`, this logs a warning if `actorOf` falls back to `LocalActorRef` * versus creating a `RemoteActorRef`. Override to log a more granular reason if using * `RemoteActorRefProvider` as a superclass. */ @@ -416,7 +419,8 @@ private[akka] class RemoteActorRefProvider( case "user" | "system" => deployer.lookup(elems.drop(1)) case "remote" => lookupRemotes(elems) case _ => None - } else None + } + else None val deployment = { deploy.toList ::: lookup.toList match { @@ -710,7 +714,7 @@ private[akka] class RemoteActorRef private[akka] ( def sendSystemMessage(message: SystemMessage): Unit = try { - //send to remote, unless watch message is intercepted by the remoteWatcher + // send to remote, unless watch message is intercepted by the remoteWatcher message match { case Watch(watchee, watcher) => if (isWatchIntercepted(watchee, watcher)) @@ -720,7 +724,7 @@ private[akka] class RemoteActorRef private[akka] ( else provider.warnIfUnsafeDeathwatchWithoutCluster(watchee, watcher, "Watch") - //Unwatch has a different signature, need to pattern match arguments against InternalActorRef + // Unwatch has a different signature, need to pattern match arguments against InternalActorRef case Unwatch(watchee: InternalActorRef, watcher: InternalActorRef) => if (isWatchIntercepted(watchee, watcher)) provider.remoteWatcher.foreach(_ ! RemoteWatcher.UnwatchRemote(watchee, watcher)) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index a84972eb2b..00d9ed85a6 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -159,75 +159,76 @@ private[akka] class RemoteSystemDaemon( override def !(msg: Any)(implicit sender: ActorRef = Actor.noSender): Unit = try msg match { - case message: DaemonMsg => - log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address) - message match { - case DaemonMsgCreate(_, _, path, _) if untrustedMode => - log.debug("does not accept deployments (untrusted) for [{}]", path) // TODO add security marker? + case message: DaemonMsg => + log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address) + message match { + case DaemonMsgCreate(_, _, path, _) if untrustedMode => + log.debug("does not accept deployments (untrusted) for [{}]", path) // TODO add security marker? - case DaemonMsgCreate(props, deploy, path, supervisor) if allowListEnabled => - val name = props.clazz.getCanonicalName - if (remoteDeploymentAllowList.contains(name)) - doCreateActor(message, props, deploy, path, supervisor) - else { - val ex = - new NotAllowedClassRemoteDeploymentAttemptException(props.actorClass(), remoteDeploymentAllowList) - log.error( - LogMarker.Security, - ex, - "Received command to create remote Actor, but class [{}] is not white-listed! " + - "Target path: [{}]", - props.actorClass(), - path) - } - case DaemonMsgCreate(props, deploy, path, supervisor) => - doCreateActor(message, props, deploy, path, supervisor) - } - - case sel: ActorSelectionMessage => - val (concatenatedChildNames, m) = { - val iter = sel.elements.iterator - // find child elements, and the message to send, which is a remaining ActorSelectionMessage - // in case of SelectChildPattern, otherwise the actual message of the selection - @tailrec def rec(acc: List[String]): (List[String], Any) = - if (iter.isEmpty) - (acc.reverse, sel.msg) - else { - iter.next() match { - case SelectChildName(name) => rec(name :: acc) - case SelectParent if acc.isEmpty => rec(acc) - case SelectParent => rec(acc.tail) - case pat: SelectChildPattern => (acc.reverse, sel.copy(elements = pat +: iter.toVector)) + case DaemonMsgCreate(props, deploy, path, supervisor) if allowListEnabled => + val name = props.clazz.getCanonicalName + if (remoteDeploymentAllowList.contains(name)) + doCreateActor(message, props, deploy, path, supervisor) + else { + val ex = + new NotAllowedClassRemoteDeploymentAttemptException(props.actorClass(), remoteDeploymentAllowList) + log.error( + LogMarker.Security, + ex, + "Received command to create remote Actor, but class [{}] is not white-listed! " + + "Target path: [{}]", + props.actorClass(), + path) } - } - rec(Nil) - } - getChild(concatenatedChildNames.iterator) match { - case Nobody => - val emptyRef = - new EmptyLocalActorRef(system.provider, path / sel.elements.map(_.toString), system.eventStream) - emptyRef.tell(sel, sender) - case child => - child.tell(m, sender) - } + case DaemonMsgCreate(props, deploy, path, supervisor) => + doCreateActor(message, props, deploy, path, supervisor) + } - case Identify(messageId) => sender ! ActorIdentity(messageId, Some(this)) + case sel: ActorSelectionMessage => + val (concatenatedChildNames, m) = { + val iter = sel.elements.iterator + // find child elements, and the message to send, which is a remaining ActorSelectionMessage + // in case of SelectChildPattern, otherwise the actual message of the selection + @tailrec def rec(acc: List[String]): (List[String], Any) = + if (iter.isEmpty) + (acc.reverse, sel.msg) + else { + iter.next() match { + case SelectChildName(name) => rec(name :: acc) + case SelectParent if acc.isEmpty => rec(acc) + case SelectParent => rec(acc.tail) + case pat: SelectChildPattern => (acc.reverse, sel.copy(elements = pat +: iter.toVector)) + } + } + rec(Nil) + } + getChild(concatenatedChildNames.iterator) match { + case Nobody => + val emptyRef = + new EmptyLocalActorRef(system.provider, path / sel.elements.map(_.toString), system.eventStream) + emptyRef.tell(sel, sender) + case child => + child.tell(m, sender) + } - case TerminationHook => - terminating.switchOn { - terminationHookDoneWhenNoChildren() - foreachChild { system.stop } - } + case Identify(messageId) => sender ! ActorIdentity(messageId, Some(this)) - case AddressTerminated(address) => - foreachChild { - case a: InternalActorRef if a.getParent.path.address == address => system.stop(a) - case _ => // skip, this child doesn't belong to the terminated address - } + case TerminationHook => + terminating.switchOn { + terminationHookDoneWhenNoChildren() + foreachChild { system.stop } + } - case unknown => log.warning(LogMarker.Security, "Unknown message [{}] received by [{}]", unknown, this) + case AddressTerminated(address) => + foreachChild { + case a: InternalActorRef if a.getParent.path.address == address => system.stop(a) + case _ => // skip, this child doesn't belong to the terminated address + } - } catch { + case unknown => log.warning(LogMarker.Security, "Unknown message [{}] received by [{}]", unknown, this) + + } + catch { case NonFatal(e) => log.error(e, "exception while processing remote command [{}] from [{}]", msg, sender) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala index 6751d98e81..6d4c39cb00 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala @@ -90,7 +90,6 @@ private[akka] object RemoteWatcher { * * For bi-directional watch between two nodes the same thing will be established in * both directions, but independent of each other. - * */ @InternalApi private[akka] class RemoteWatcher( @@ -207,7 +206,8 @@ private[akka] class RemoteWatcher( } } - /** Returns true if either has cluster or `akka.remote.use-unsafe-remote-features-outside-cluster` + /** + * Returns true if either has cluster or `akka.remote.use-unsafe-remote-features-outside-cluster` * is enabled. Can be overridden when using RemoteWatcher as a superclass. */ protected def shouldWatch(@unused watchee: InternalActorRef): Boolean = { diff --git a/akka-remote/src/main/scala/akka/remote/Remoting.scala b/akka-remote/src/main/scala/akka/remote/Remoting.scala index 8931d764fe..9b310a2ba3 100644 --- a/akka-remote/src/main/scala/akka/remote/Remoting.scala +++ b/akka-remote/src/main/scala/akka/remote/Remoting.scala @@ -111,7 +111,7 @@ private[remote] object Remoting { case None => throw new RemoteTransportException( s"No transport is loaded for protocol: [${remote.protocol}], available protocols: [${transportMapping.keys - .mkString(", ")}]", + .mkString(", ")}]", null) } } @@ -677,7 +677,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) address, quarantineUid, Deadline.now + settings.QuarantineDuration) - case _ => //the quarantine uid has lost the race with some failure, do nothing + case _ => // the quarantine uid has lost the race with some failure, do nothing } case (Some(Quarantined(uid, _)), Some(quarantineUid)) if uid == quarantineUid => // the UID to be quarantined already exists, do nothing case (_, Some(quarantineUid)) => @@ -898,7 +898,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) // Driver val driver = extendedSystem.dynamicAccess .createInstanceFor[Transport](fqn, args) - .recover({ + .recover { case exception => throw new IllegalArgumentException( @@ -907,7 +907,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) "[akka.actor.ExtendedActorSystem] and [com.typesafe.config.Config] parameters", exception) - }) + } .get // Iteratively decorates the bottom level driver with a list of adapters. diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala index 9a014e0cbe..2421936934 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala @@ -323,8 +323,8 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr private val priorityMessageDestinations = WildcardIndex[NotUsed]() - // These destinations are not defined in configuration because it should not - // be possible to abuse the control channel + // These destinations are not defined in configuration because it should not + // be possible to abuse the control channel .insert(Array("system", "remote-watcher"), NotUsed) // these belongs to cluster and should come from there .insert(Array("system", "cluster", "core", "daemon", "heartbeatSender"), NotUsed) @@ -348,17 +348,16 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr capacity = settings.Advanced.OutboundMessageQueueSize * settings.Advanced.OutboundLanes * 3) - private val associationRegistry = new AssociationRegistry( - remoteAddress => - new Association( - this, - materializer, - controlMaterializer, - remoteAddress, - controlSubject, - settings.LargeMessageDestinations, - priorityMessageDestinations, - outboundEnvelopePool)) + private val associationRegistry = new AssociationRegistry(remoteAddress => + new Association( + this, + materializer, + controlMaterializer, + remoteAddress, + controlSubject, + settings.LargeMessageDestinations, + priorityMessageDestinations, + outboundEnvelopePool)) def remoteAddresses: Set[Address] = associationRegistry.allAssociations.map(_.remoteAddress) @@ -636,7 +635,7 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr killSwitch.abort(ShutdownSignal) flightRecorder.transportKillSwitchPulled() for { - _ <- streamsCompleted.recover { case _ => Done } + _ <- streamsCompleted.recover { case _ => Done } _ <- shutdownTransport().recover { case _ => Done } } yield { // no need to explicitly shut down the contained access since it's lifecycle is bound to the Decoder diff --git a/akka-remote/src/main/scala/akka/remote/artery/Association.scala b/akka-remote/src/main/scala/akka/remote/artery/Association.scala index 71b95581ce..4ba84570a9 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Association.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Association.scala @@ -151,7 +151,8 @@ private[remote] class Association( override def settings = transport.settings private def advancedSettings = transport.settings.Advanced - private val deathWatchNotificationFlushEnabled = advancedSettings.DeathWatchNotificationFlushTimeout > Duration.Zero && transport.provider.settings.HasCluster + private val deathWatchNotificationFlushEnabled = + advancedSettings.DeathWatchNotificationFlushTimeout > Duration.Zero && transport.provider.settings.HasCluster private val restartCounter = new RestartCounter(advancedSettings.OutboundMaxRestarts, advancedSettings.OutboundRestartTimeout) @@ -818,7 +819,7 @@ private[remote] class Association( .fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) .via(streamKillSwitch.flow) .viaMat(transport.outboundTestFlow(this))(Keep.both) - .toMat(transport.outbound(this))({ case ((a, b), (c, d)) => (a, b, c, d) }) // "keep all, exploded" + .toMat(transport.outbound(this)) { case ((a, b), (c, d)) => (a, b, c, d) } // "keep all, exploded" .run()(materializer) queueValue.inject(wrapper.queue) @@ -1055,9 +1056,11 @@ private[remote] class Association( streamKillSwitch: SharedKillSwitch, completed: Future[Done]): Unit = { implicit val ec = materializer.executionContext - updateStreamMatValues(streamId, OutboundStreamMatValues(OptionVal.Some(streamKillSwitch), completed.recover { - case _ => Done - }, stopping = OptionVal.None)) + updateStreamMatValues(streamId, + OutboundStreamMatValues(OptionVal.Some(streamKillSwitch), + completed.recover { + case _ => Done + }, stopping = OptionVal.None)) } @tailrec private def updateStreamMatValues(streamId: Int, values: OutboundStreamMatValues): Unit = { diff --git a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala index 92889113b2..5e8657b747 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala @@ -417,44 +417,49 @@ private[remote] class Decoder( val originUid = headerBuilder.uid val association = inboundContext.association(originUid) - val recipient: OptionVal[InternalActorRef] = try headerBuilder.recipientActorRef(originUid) match { - case OptionVal.Some(ref) => - OptionVal(ref.asInstanceOf[InternalActorRef]) - case OptionVal.None if headerBuilder.recipientActorRefPath.isDefined => - resolveRecipient(headerBuilder.recipientActorRefPath.get) - case _ => - OptionVal.None - } catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) - OptionVal.None - } + val recipient: OptionVal[InternalActorRef] = + try headerBuilder.recipientActorRef(originUid) match { + case OptionVal.Some(ref) => + OptionVal(ref.asInstanceOf[InternalActorRef]) + case OptionVal.None if headerBuilder.recipientActorRefPath.isDefined => + resolveRecipient(headerBuilder.recipientActorRefPath.get) + case _ => + OptionVal.None + } + catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) + OptionVal.None + } - val sender: OptionVal[InternalActorRef] = try headerBuilder.senderActorRef(originUid) match { - case OptionVal.Some(ref) => - OptionVal(ref.asInstanceOf[InternalActorRef]) - case OptionVal.None if headerBuilder.senderActorRefPath.isDefined => - OptionVal(actorRefResolver.resolve(headerBuilder.senderActorRefPath.get)) - case _ => - OptionVal.None - } catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) - OptionVal.None - } + val sender: OptionVal[InternalActorRef] = + try headerBuilder.senderActorRef(originUid) match { + case OptionVal.Some(ref) => + OptionVal(ref.asInstanceOf[InternalActorRef]) + case OptionVal.None if headerBuilder.senderActorRefPath.isDefined => + OptionVal(actorRefResolver.resolve(headerBuilder.senderActorRefPath.get)) + case _ => + OptionVal.None + } + catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) + OptionVal.None + } - val classManifestOpt = try headerBuilder.manifest(originUid) - catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress manifest from originUid [{}]. {}", originUid, e) - OptionVal.None - } + val classManifestOpt = + try headerBuilder.manifest(originUid) + catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress manifest from originUid [{}]. {}", originUid, e) + OptionVal.None + } if ((recipient.isEmpty && headerBuilder.recipientActorRefPath.isEmpty && !headerBuilder.isNoRecipient) || - (sender.isEmpty && headerBuilder.senderActorRefPath.isEmpty && !headerBuilder.isNoSender)) { + (sender.isEmpty && headerBuilder.senderActorRefPath.isEmpty && !headerBuilder.isNoSender)) { log.debug( "Dropping message for unknown recipient/sender. It was probably sent from system [{}] with compression " + "table [{}] built for previous incarnation of the destination system, or it was compressed with a table " + diff --git a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala index 53969a5228..d4c068a3b2 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala @@ -257,16 +257,17 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl } }) else - setHandler(in, new InHandler { - override def onPush(): Unit = { - val env = grab(in) - env.message match { - case HandshakeReq(from, to) => onHandshakeReq(from, to) - case _ => - onMessage(env) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val env = grab(in) + env.message match { + case HandshakeReq(from, to) => onHandshakeReq(from, to) + case _ => + onMessage(env) + } } - } - }) + }) private def onHandshakeReq(from: UniqueAddress, to: Address): Unit = { if (to == inboundContext.localAddress.address) { diff --git a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala index 9295becc57..82564d2815 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala @@ -61,7 +61,7 @@ private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: R message match { case sel: ActorSelectionMessage => if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) || - sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) { + sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) { if (debugLogEnabled) log.debug( LogMarker.Security, diff --git a/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala b/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala index f16421ca07..0dba837f0d 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala @@ -20,6 +20,6 @@ private[remote] class ObjectPool[A <: AnyRef](capacity: Int, create: () => A, cl def release(obj: A): Boolean = { clear(obj) - (!pool.offer(obj)) + !pool.offer(obj) } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala index 2bee5b1cec..7bf847e8e3 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala @@ -167,7 +167,6 @@ abstract class RemoteInstrument { * | ... metadata entry ... | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * }}} - * */ private[remote] final class RemoteInstruments( private val system: ExtendedActorSystem, diff --git a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala index 8fdb7ee648..0041bbae45 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala @@ -184,7 +184,7 @@ import akka.util.PrettyDuration.PrettyPrintableDuration @tailrec private def clearUnacknowledged(ackedSeqNo: Long): Unit = { if (!unacknowledged.isEmpty && - unacknowledged.peek().message.asInstanceOf[SystemMessageEnvelope].seqNo <= ackedSeqNo) { + unacknowledged.peek().message.asInstanceOf[SystemMessageEnvelope].seqNo <= ackedSeqNo) { unacknowledged.removeFirst() if (unacknowledged.isEmpty) cancelTimer(resendInterval) diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala index f38d0efad4..15d445e88a 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala @@ -72,7 +72,7 @@ private[remote] object AeronSink { onPublicationClosed.invoke(()) true } else if (giveUpAfterNanos >= 0 && (n & TimerCheckMask) == 0 && (System - .nanoTime() - startTime) > giveUpAfterNanos) { + .nanoTime() - startTime) > giveUpAfterNanos) { // the task is invoked by the spinning thread, only check nanoTime each 8192th invocation n = 0L onGiveUp.invoke(()) diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala index 917ee35408..8b4181cc8a 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala @@ -50,8 +50,7 @@ private[remote] object AeronSource { class MessageHandler(pool: EnvelopeBufferPool) { def reset(): Unit = messageReceived = null - private[remote] var messageReceived - : EnvelopeBuffer = null // private to avoid scalac warning about exposing EnvelopeBuffer + private[remote] var messageReceived: EnvelopeBuffer = null // private to avoid scalac warning about exposing EnvelopeBuffer val fragmentsHandler = new Fragments(data => messageReceived = data, pool) } diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala index 015d5b594a..9f624a6aef 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala @@ -321,7 +321,7 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro private def aeronSourceSpinningStrategy: Int = if (settings.Advanced.InboundLanes > 1 || // spinning was identified to be the cause of massive slowdowns with multiple lanes, see #21365 - settings.Advanced.Aeron.IdleCpuLevel < 5) 0 // also don't spin for small IdleCpuLevels + settings.Advanced.Aeron.IdleCpuLevel < 5) 0 // also don't spin for small IdleCpuLevels else 50 * settings.Advanced.Aeron.IdleCpuLevel - 240 override protected def bindInboundStreams(): (Int, Int) = { @@ -356,7 +356,7 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro val (resourceLife, ctrl, completed) = aeronSource(ControlStreamId, envelopeBufferPool, inboundChannel) .via(inboundFlow(settings, NoInboundCompressions)) - .toMat(inboundControlSink)({ case (a, (c, d)) => (a, c, d) }) + .toMat(inboundControlSink) { case (a, (c, d)) => (a, c, d) } .run()(controlMaterializer) attachControlMessageObserver(ctrl) @@ -372,7 +372,7 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro if (inboundLanes == 1) { aeronSource(OrdinaryStreamId, envelopeBufferPool, inboundChannel) .viaMat(inboundFlow(settings, _inboundCompressions))(Keep.both) - .toMat(inboundSink(envelopeBufferPool))({ case ((a, b), c) => (a, b, c) }) + .toMat(inboundSink(envelopeBufferPool)) { case ((a, b), c) => (a, b, c) } .run()(materializer) } else { @@ -391,9 +391,9 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro new FixedSizePartitionHub[InboundEnvelope]( inboundLanePartitioner, inboundLanes, - settings.Advanced.InboundHubBufferSize)))({ + settings.Advanced.InboundHubBufferSize))) { case ((a, b), c) => (a, b, c) - }) + } .run()(materializer) val lane = inboundSink(envelopeBufferPool) @@ -445,9 +445,11 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro aeronSourceLifecycle: AeronSource.AeronLifecycle, completed: Future[Done]): Unit = { implicit val ec = materializer.executionContext - updateStreamMatValues(streamId, InboundStreamMatValues[AeronLifecycle](aeronSourceLifecycle, completed.recover { - case _ => Done - })) + updateStreamMatValues(streamId, + InboundStreamMatValues[AeronLifecycle](aeronSourceLifecycle, + completed.recover { + case _ => Done + })) } override protected def shutdownTransport(): Future[Done] = { diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala index 897a457290..9cfbae1e5c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala @@ -48,7 +48,7 @@ private[akka] object TaskRunner { } else if (elements(i) eq null) elements(i) = e else - tryAdd(i + 1) //recursive + tryAdd(i + 1) // recursive } tryAdd(0) } @@ -61,7 +61,7 @@ private[akka] object TaskRunner { else if (elements(i) == e) elements(i) = null.asInstanceOf[T] else - tryRemove(i + 1) //recursive + tryRemove(i + 1) // recursive } tryRemove(0) } @@ -170,16 +170,17 @@ private[akka] class TaskRunner(system: ExtendedActorSystem, val idleCpuLevel: In val size = elements.length while (i < size) { val task = elements(i) - if (task ne null) try { - if (task()) { - tasks.remove(task) - reset = true + if (task ne null) + try { + if (task()) { + tasks.remove(task) + reset = true + } + } catch { + case NonFatal(e) => + log.error(e, "Task failed") + tasks.remove(task) } - } catch { - case NonFatal(e) => - log.error(e, "Task failed") - tasks.remove(task) - } i += 1 } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala index 5e7426685b..a51aebe5cd 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala @@ -55,7 +55,7 @@ private[remote] final class CompressionTable[T]( val mit = _dictionary.entrySet().iterator while (i < tups.length) { val entry = mit.next() - tups(i) = (entry.getKey -> entry.getValue.intValue()) + tups(i) = entry.getKey -> entry.getValue.intValue() i += 1 } util.Arrays.sort(tups, CompressionTable.compareBy2ndValue[T]) diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala index d2f8e141f8..26a5cc5f5b 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala @@ -27,7 +27,7 @@ private[remote] final case class DecompressionTable[T](originUid: Long, version: /** Writes complete table as String (heavy operation) */ override def toString = s"DecompressionTable($originUid, $version, " + - s"Map(${table.zipWithIndex.map({ case (t, i) => s"$i -> $t" }).mkString(",")}))" + s"Map(${table.zipWithIndex.map { case (t, i) => s"$i -> $t" }.mkString(",")}))" } /** INTERNAL API */ diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala index abb0acb4cb..57d175fe8b 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala @@ -299,7 +299,7 @@ private[remote] object InboundCompression { s"[compress] Found table [version: ${version}], was [OLD][${t}], old tables: [${oldTables.map(_.version)}]") case OptionVal.None => println(s"[compress] Did not find table [version: ${version}], old tables: [${oldTables - .map(_.version)}], activeTable: ${activeTable}, nextTable: ${nextTable}") + .map(_.version)}], activeTable: ${activeTable}, nextTable: ${nextTable}") } } found diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala index 2b9ecbb831..03703fe86f 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala @@ -326,14 +326,15 @@ private[remote] class ArteryTcpTransport( // overhead. val inboundStream = Sink.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ - val partition = b.add(Partition[EnvelopeBuffer](3, env => { - env.streamId match { - case OrdinaryStreamId => 1 - case ControlStreamId => 0 - case LargeStreamId => 2 - case other => throw new IllegalArgumentException(s"Unexpected streamId [$other]") - } - })) + val partition = b.add(Partition[EnvelopeBuffer](3, + env => { + env.streamId match { + case OrdinaryStreamId => 1 + case ControlStreamId => 0 + case LargeStreamId => 2 + case other => throw new IllegalArgumentException(s"Unexpected streamId [$other]") + } + })) partition.out(0) ~> controlStream partition.out(1) ~> ordinaryMessagesStream partition.out(2) ~> largeMessagesStream @@ -365,10 +366,11 @@ private[remote] class ArteryTcpTransport( inboundKillSwitch = KillSwitches.shared("inboundKillSwitch") val allStopped: Future[Done] = for { - _ <- controlStreamCompleted.recover { case _ => Done } + _ <- controlStreamCompleted.recover { case _ => Done } _ <- ordinaryMessagesStreamCompleted.recover { case _ => Done } _ <- if (largeMessageChannelEnabled) - largeMessagesStreamCompleted.recover { case _ => Done } else Future.successful(Done) + largeMessagesStreamCompleted.recover { case _ => Done } + else Future.successful(Done) } yield Done allStopped.foreach(_ => runInboundStreams(port, bindPort)) } @@ -385,7 +387,7 @@ private[remote] class ArteryTcpTransport( .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .via(inboundFlow(settings, NoInboundCompressions)) - .toMat(inboundControlSink)({ case (a, (c, d)) => (a, c, d) }) + .toMat(inboundControlSink) { case (a, (c, d)) => (a, c, d) } .run()(controlMaterializer) attachControlMessageObserver(ctrl) updateStreamMatValues(completed) @@ -403,7 +405,7 @@ private[remote] class ArteryTcpTransport( .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .viaMat(inboundFlow(settings, _inboundCompressions))(Keep.both) - .toMat(inboundSink(envelopeBufferPool))({ case ((a, b), c) => (a, b, c) }) + .toMat(inboundSink(envelopeBufferPool)) { case ((a, b), c) => (a, b, c) } .run()(materializer) } else { @@ -426,9 +428,9 @@ private[remote] class ArteryTcpTransport( new FixedSizePartitionHub[InboundEnvelope]( inboundLanePartitioner, inboundLanes, - settings.Advanced.InboundHubBufferSize)))({ + settings.Advanced.InboundHubBufferSize))) { case ((a, b), c) => (a, b, c) - }) + } .run()(materializer) val lane = inboundSink(envelopeBufferPool) diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala index d04236883e..f9d216b166 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala @@ -50,10 +50,10 @@ import akka.util.ByteString def encodeFrameHeader(frameLength: Int): ByteString = ByteString.fromArrayUnsafe( Array[Byte]( - (frameLength & 0xff).toByte, - ((frameLength & 0xff00) >> 8).toByte, - ((frameLength & 0xff0000) >> 16).toByte, - ((frameLength & 0xff000000) >> 24).toByte)) + (frameLength & 0xFF).toByte, + ((frameLength & 0xFF00) >> 8).toByte, + ((frameLength & 0xFF0000) >> 16).toByte, + ((frameLength & 0xFF000000) >> 24).toByte)) } /** diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala index c3d2dc609d..8bdd545557 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala @@ -340,7 +340,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW ActorIdentityManifest -> deserializeActorIdentity, StatusSuccessManifest -> deserializeStatusSuccess, StatusFailureManifest -> deserializeStatusFailure, - StatusReplyAckManifest -> ((_) => StatusReply.Ack), + StatusReplyAckManifest -> (_ => StatusReply.Ack), StatusReplySuccessManifest -> deserializeStatusReplySuccess, StatusReplyErrorMessageManifest -> deserializeStatusReplyErrorMessage, StatusReplyErrorExceptionManifest -> deserializeStatusReplyErrorException, @@ -348,17 +348,17 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW ActorRefManifest -> deserializeActorRefBytes, OptionManifest -> deserializeOption, OptionalManifest -> deserializeOptional, - PoisonPillManifest -> ((_) => PoisonPill), - KillManifest -> ((_) => Kill), - RemoteWatcherHBManifest -> ((_) => RemoteWatcher.Heartbeat), - DoneManifest -> ((_) => Done), - NotUsedManifest -> ((_) => NotUsed), + PoisonPillManifest -> (_ => PoisonPill), + KillManifest -> (_ => Kill), + RemoteWatcherHBManifest -> (_ => RemoteWatcher.Heartbeat), + DoneManifest -> (_ => Done), + NotUsedManifest -> (_ => NotUsed), AddressManifest -> deserializeAddressData, UniqueAddressManifest -> deserializeUniqueAddress, RemoteWatcherHBRespManifest -> deserializeHeartbeatRsp, ActorInitializationExceptionManifest -> deserializeActorInitializationException, ThrowableNotSerializableExceptionManifest -> deserializeThrowableNotSerializableException, - LocalScopeManifest -> ((_) => LocalScope), + LocalScopeManifest -> (_ => LocalScope), RemoteScopeManifest -> deserializeRemoteScope, ConfigManifest -> deserializeConfig, FromConfigManifest -> deserializeFromConfig, diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala index 23a3ee5cda..53d65ae1aa 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala @@ -73,8 +73,8 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer if (method eq null) clazz.getDeclaredMethod("parseFrom", ProtobufSerializer.ARRAY_OF_BYTE_ARRAY: _*) else method if (parsingMethodBindingRef.compareAndSet( - parsingMethodBinding, - parsingMethodBinding.updated(clazz, unCachedParsingMethod))) + parsingMethodBinding, + parsingMethodBinding.updated(clazz, unCachedParsingMethod))) unCachedParsingMethod else parsingMethod(unCachedParsingMethod) @@ -99,8 +99,8 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer if (method eq null) clazz.getMethod("toByteArray") else method if (toByteArrayMethodBindingRef.compareAndSet( - toByteArrayMethodBinding, - toByteArrayMethodBinding.updated(clazz, unCachedtoByteArrayMethod))) + toByteArrayMethodBinding, + toByteArrayMethodBinding.updated(clazz, unCachedtoByteArrayMethod))) unCachedtoByteArrayMethod else toByteArrayMethod(unCachedtoByteArrayMethod) diff --git a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala index 8985bde929..a66afb2604 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala @@ -34,9 +34,9 @@ class TransportAdapters(system: ExtendedActorSystem) extends Extension { private val adaptersTable: Map[String, TransportAdapterProvider] = for ((name, fqn) <- settings.Adapters) yield { name -> system.dynamicAccess .createInstanceFor[TransportAdapterProvider](fqn, immutable.Seq.empty) - .recover({ + .recover { case e => throw new IllegalArgumentException(s"Cannot instantiate transport adapter [${fqn}]", e) - }) + } .get } diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala index e527bd2bf0..c76486077d 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala @@ -264,7 +264,7 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { .newBuilder() .setInstruction(controlMessageBuilder.build) .build - .toByteArray) //Reuse Byte Array (naughty!) + .toByteArray) // Reuse Byte Array (naughty!) } private def serializeActorRef(defaultAddress: Address, ref: ActorRef): ActorRefData = { diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala index 3543ab7452..3f6436ae80 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala @@ -61,9 +61,9 @@ private[remote] class AkkaProtocolSettings(config: Config) { } @nowarn("msg=deprecated") -private[remote] object AkkaProtocolTransport { //Couldn't these go into the Remoting Extension/ RemoteSettings instead? +private[remote] object AkkaProtocolTransport { // Couldn't these go into the Remoting Extension/ RemoteSettings instead? val AkkaScheme: String = "akka" - val AkkaOverhead: Int = 0 //Don't know yet + val AkkaOverhead: Int = 0 // Don't know yet val UniqueId = new java.util.concurrent.atomic.AtomicInteger(0) final case class AssociateUnderlyingRefuseUid( @@ -514,7 +514,7 @@ private[remote] class ProtocolStateActor( sendDisassociate(wrappedHandle, Unknown) stop( FSM.Failure(TimeoutReason("No response from remote for outbound association. Handshake timed out after " + - s"[${settings.HandshakeTimeout.toMillis} ms]."))) + s"[${settings.HandshakeTimeout.toMillis} ms]."))) case Event(HandshakeTimer, InboundUnassociated(_, wrappedHandle)) => if (log.isDebugEnabled) @@ -526,7 +526,7 @@ private[remote] class ProtocolStateActor( sendDisassociate(wrappedHandle, Unknown) stop( FSM.Failure(TimeoutReason("No response from remote for inbound association. Handshake timed out after " + - s"[${settings.HandshakeTimeout.toMillis} ms]."))) + s"[${settings.HandshakeTimeout.toMillis} ms]."))) } @@ -608,7 +608,7 @@ private[remote] class ProtocolStateActor( sendDisassociate(wrappedHandle, Unknown) stop( FSM.Failure(TimeoutReason(s"No response from remote. " + - s"Transport failure detector triggered. (internal state was $stateName)"))) + s"Transport failure detector triggered. (internal state was $stateName)"))) } } diff --git a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala index 210f96a058..7c6a716e45 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala @@ -106,9 +106,8 @@ private[remote] class FailureInjectorTransportAdapter( protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit = { // Association is simulated to be failed if there was either an inbound or outbound message drop if (shouldDropInbound(remoteAddress, (), "interceptAssociate") || shouldDropOutbound( - remoteAddress, - (), - "interceptAssociate")) + remoteAddress, (), + "interceptAssociate")) statusPromise.failure(new FailureInjectorException("Simulated failure of association to " + remoteAddress)) else statusPromise.completeWith(wrappedTransport.associate(remoteAddress).map { handle => @@ -118,7 +117,7 @@ private[remote] class FailureInjectorTransportAdapter( } def notify(ev: AssociationEvent): Unit = ev match { - case InboundAssociation(handle) if shouldDropInbound(handle.remoteAddress, ev, "notify") => //Ignore + case InboundAssociation(handle) if shouldDropInbound(handle.remoteAddress, ev, "notify") => // Ignore case _ => upstreamListener match { case Some(listener) => listener.notify(interceptInboundAssociation(ev)) diff --git a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala index 3c9d3a84d7..a7e22fc2c3 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala @@ -101,22 +101,22 @@ class TestTransport( * The [[akka.remote.transport.TestTransport.SwitchableLoggedBehavior]] for the listen() method. */ val listenBehavior = new SwitchableLoggedBehavior[Unit, (Address, Promise[AssociationEventListener])]( - (_) => defaultListen, - (_) => registry.logActivity(ListenAttempt(localAddress))) + _ => defaultListen, + _ => registry.logActivity(ListenAttempt(localAddress))) /** * The [[akka.remote.transport.TestTransport.SwitchableLoggedBehavior]] for the associate() method. */ val associateBehavior = new SwitchableLoggedBehavior[Address, AssociationHandle]( defaultAssociate _, - (remoteAddress) => registry.logActivity(AssociateAttempt(localAddress, remoteAddress))) + remoteAddress => registry.logActivity(AssociateAttempt(localAddress, remoteAddress))) /** * The [[akka.remote.transport.TestTransport.SwitchableLoggedBehavior]] for the shutdown() method. */ val shutdownBehavior = new SwitchableLoggedBehavior[Unit, Boolean]( - (_) => defaultShutdown, - (_) => registry.logActivity(ShutdownAttempt(localAddress))) + _ => defaultShutdown, + _ => registry.logActivity(ShutdownAttempt(localAddress))) override def listen: Future[(Address, Promise[AssociationEventListener])] = listenBehavior(()) // Need to do like this for binary compatibility reasons @@ -147,22 +147,25 @@ class TestTransport( * altering the behavior via pushDelayed will turn write to a blocking operation -- use of pushDelayed therefore * is not recommended. */ - val writeBehavior = new SwitchableLoggedBehavior[(TestAssociationHandle, ByteString), Boolean](defaultBehavior = { - defaultWrite _ - }, logCallback = { - case (handle, payload) => - registry.logActivity(WriteAttempt(handle.localAddress, handle.remoteAddress, payload)) - }) + val writeBehavior = new SwitchableLoggedBehavior[(TestAssociationHandle, ByteString), Boolean]( + defaultBehavior = { + defaultWrite _ + }, + logCallback = { + case (handle, payload) => + registry.logActivity(WriteAttempt(handle.localAddress, handle.remoteAddress, payload)) + }) /** * The [[akka.remote.transport.TestTransport.SwitchableLoggedBehavior]] for the disassociate() method on handles. All * handle calls pass through this call. */ val disassociateBehavior = new SwitchableLoggedBehavior[TestAssociationHandle, Unit](defaultBehavior = { - defaultDisassociate _ - }, logCallback = { (handle) => - registry.logActivity(DisassociateAttempt(handle.localAddress, handle.remoteAddress)) - }) + defaultDisassociate _ + }, + logCallback = { handle => + registry.logActivity(DisassociateAttempt(handle.localAddress, handle.remoteAddress)) + }) private[akka] def write(handle: TestAssociationHandle, payload: ByteString): Boolean = Await.result(writeBehavior((handle, payload)), 3.seconds) diff --git a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala index 5bc170a67e..5bdcf044bc 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala @@ -115,9 +115,9 @@ object ThrottlerTransportAdapter { extends ThrottleMode { private def isAvailable(nanoTimeOfSend: Long, tokens: Int): Boolean = - if ((tokens > capacity && availableTokens > 0)) { + if (tokens > capacity && availableTokens > 0) { true // Allow messages larger than capacity through, it will be recorded as negative tokens - } else min((availableTokens + tokensGenerated(nanoTimeOfSend)), capacity) >= tokens + } else min(availableTokens + tokensGenerated(nanoTimeOfSend), capacity) >= tokens override def tryConsumeTokens(nanoTimeOfSend: Long, tokens: Int): (ThrottleMode, Boolean) = { if (isAvailable(nanoTimeOfSend, tokens)) @@ -375,14 +375,14 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) internalTarget.sendSystemMessage(Watch(internalTarget, ref)) target.tell(mode, ref) ref.result.future.transform({ - case Terminated(t) if t.path == target.path => - SetThrottleAck - case SetThrottleAck => - internalTarget.sendSystemMessage(Unwatch(target, ref)) - SetThrottleAck - case _ => - throw new IllegalArgumentException() // won't happen, compiler exhaustiveness check pleaser - }, t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ExecutionContexts.parasitic) + case Terminated(t) if t.path == target.path => + SetThrottleAck + case SetThrottleAck => + internalTarget.sendSystemMessage(Unwatch(target, ref)) + SetThrottleAck + case _ => + throw new IllegalArgumentException() // won't happen, compiler exhaustiveness check pleaser + }, t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ExecutionContexts.parasitic) } } @@ -490,14 +490,15 @@ private[transport] class ThrottledAssociation( case Event(mode: ThrottleMode, ExposedHandle(exposedHandle)) => inboundThrottleMode = mode try if (mode == Blackhole) { - throttledMessages = Queue.empty[ByteString] - exposedHandle.disassociate("the association was blackholed", log) - stop() - } else { - associationHandler.notify(InboundAssociation(exposedHandle)) - exposedHandle.readHandlerPromise.future.map(Listener(_)).pipeTo(self) - goto(WaitUpstreamListener) - } finally sender() ! SetThrottleAck + throttledMessages = Queue.empty[ByteString] + exposedHandle.disassociate("the association was blackholed", log) + stop() + } else { + associationHandler.notify(InboundAssociation(exposedHandle)) + exposedHandle.readHandlerPromise.future.map(Listener(_)).pipeTo(self) + goto(WaitUpstreamListener) + } + finally sender() ! SetThrottleAck } when(WaitUpstreamListener) { diff --git a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala index af0f43fc25..c3f3bfbd22 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala @@ -271,7 +271,6 @@ trait AssociationHandle { * some transports may not support it (hardware connections). Remote endpoint of the channel or connection MAY * be notified, but this is not guaranteed. The Transport that provides the handle MUST guarantee that disassociate() * could be called arbitrarily many times. - * */ @deprecated( message = "Use method that states reasons to make sure disassociation reasons are logged.", diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala index cb5b241c6c..07dba21d2e 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala @@ -488,7 +488,7 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA bootstrap } - override def isResponsibleFor(address: Address): Boolean = true //TODO: Add configurable subnet filtering + override def isResponsibleFor(address: Address): Boolean = true // TODO: Add configurable subnet filtering // TODO: This should be factored out to an async (or thread-isolated) name lookup service #2960 def addressToSocketAddress(addr: Address): Future[InetSocketAddress] = addr match { @@ -527,7 +527,8 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA Some(settings.Hostname), port) match { case Some(address) => - addressFromSocketAddress(newServerChannel.getLocalAddress, schemeIdentifier, system.name, None, None) match { + addressFromSocketAddress(newServerChannel.getLocalAddress, schemeIdentifier, system.name, None, + None) match { case Some(address) => boundTo = address case None => throw new NettyTransportException( diff --git a/akka-remote/src/test/scala-jdk9-only/akka/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala b/akka-remote/src/test/scala-jdk9-only/akka/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala index 6731ffeebd..2ec662c8fb 100644 --- a/akka-remote/src/test/scala-jdk9-only/akka/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala +++ b/akka-remote/src/test/scala-jdk9-only/akka/remote/artery/jfr/JFRRemotingFlightRecorderSpec.scala @@ -32,7 +32,7 @@ class JFRRemotingFlightRecorderSpec extends AkkaSpec { """)) try { val extension = RemotingFlightRecorder(system) - extension should === (NoOpRemotingFlightRecorder) + extension should ===(NoOpRemotingFlightRecorder) } finally { TestKit.shutdownActorSystem(system) } diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala index d78c077eeb..bd4b9438b1 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala @@ -45,19 +45,19 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { "use good enough cumulative distribution function" in { val fd = createFailureDetector() - cdf(fd.phi(0, 0, 10)) should ===(0.5 +- (0.001)) - cdf(fd.phi(6L, 0, 10)) should ===(0.7257 +- (0.001)) - cdf(fd.phi(15L, 0, 10)) should ===(0.9332 +- (0.001)) - cdf(fd.phi(20L, 0, 10)) should ===(0.97725 +- (0.001)) - cdf(fd.phi(25L, 0, 10)) should ===(0.99379 +- (0.001)) - cdf(fd.phi(35L, 0, 10)) should ===(0.99977 +- (0.001)) - cdf(fd.phi(40L, 0, 10)) should ===(0.99997 +- (0.0001)) + cdf(fd.phi(0, 0, 10)) should ===(0.5 +- 0.001) + cdf(fd.phi(6L, 0, 10)) should ===(0.7257 +- 0.001) + cdf(fd.phi(15L, 0, 10)) should ===(0.9332 +- 0.001) + cdf(fd.phi(20L, 0, 10)) should ===(0.97725 +- 0.001) + cdf(fd.phi(25L, 0, 10)) should ===(0.99379 +- 0.001) + cdf(fd.phi(35L, 0, 10)) should ===(0.99977 +- 0.001) + cdf(fd.phi(40L, 0, 10)) should ===(0.99997 +- 0.0001) for (x :: y :: Nil <- (0 to 40).toList.sliding(2)) { fd.phi(x, 0, 10) should be < (fd.phi(y, 0, 10)) } - cdf(fd.phi(22, 20.0, 3)) should ===(0.7475 +- (0.001)) + cdf(fd.phi(22, 20.0, 3)) should ===(0.7475 +- 0.001) } "handle outliers without losing precision or hitting exceptions" in { @@ -70,7 +70,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetector() val test = TreeMap(0 -> 0.0, 500 -> 0.1, 1000 -> 0.3, 1200 -> 1.6, 1400 -> 4.7, 1600 -> 10.8, 1700 -> 15.3) for ((timeDiff, expectedPhi) <- test) { - fd.phi(timeDiff = timeDiff, mean = 1000.0, stdDeviation = 100.0) should ===(expectedPhi +- (0.1)) + fd.phi(timeDiff = timeDiff, mean = 1000.0, stdDeviation = 100.0) should ===(expectedPhi +- 0.1) } // larger stdDeviation results => lower phi @@ -91,7 +91,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { fd.heartbeat() fd.phi should ===(0.3 +- 0.2) fd.phi should ===(4.5 +- 0.3) - fd.phi should be > (15.0) + fd.phi should be > 15.0 } "return phi value using first interval after second heartbeat" in { @@ -99,9 +99,9 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetector(clock = fakeTimeGenerator(timeInterval)) fd.heartbeat() - fd.phi should be > (0.0) + fd.phi should be > 0.0 fd.heartbeat() - fd.phi should be > (0.0) + fd.phi should be > 0.0 } "mark node as monitored after a series of successful heartbeats" in { @@ -121,18 +121,19 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val timeInterval = List[Long](0, 1000, 100, 100, 7000) val fd = createFailureDetector(threshold = 3, clock = fakeTimeGenerator(timeInterval)) - fd.heartbeat() //0 - fd.heartbeat() //1000 - fd.heartbeat() //1100 + fd.heartbeat() // 0 + fd.heartbeat() // 1000 + fd.heartbeat() // 1100 - fd.isAvailable should ===(true) //1200 - fd.isAvailable should ===(false) //8200 + fd.isAvailable should ===(true) // 1200 + fd.isAvailable should ===(false) // 8200 } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { // 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger unreachable again val regularIntervals = 0L +: Vector.fill(999)(1000L) - val timeIntervals = regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L + val timeIntervals = + regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L val fd = createFailureDetector( threshold = 8, acceptableLostDuration = 3.seconds, @@ -183,18 +184,18 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetector(maxSampleSize = 3, clock = fakeTimeGenerator(timeInterval)) // 100 ms interval - fd.heartbeat() //0 - fd.heartbeat() //100 - fd.heartbeat() //200 - fd.heartbeat() //300 - val phi1 = fd.phi //400 + fd.heartbeat() // 0 + fd.heartbeat() // 100 + fd.heartbeat() // 200 + fd.heartbeat() // 300 + val phi1 = fd.phi // 400 // 500 ms interval, should become same phi when 100 ms intervals have been dropped - fd.heartbeat() //1000 - fd.heartbeat() //1500 - fd.heartbeat() //2000 - fd.heartbeat() //2500 - val phi2 = fd.phi //3000 - phi2 should ===(phi1 +- (0.001)) + fd.heartbeat() // 1000 + fd.heartbeat() // 1500 + fd.heartbeat() // 2000 + fd.heartbeat() // 2500 + val phi2 = fd.phi // 3000 + phi2 should ===(phi1 +- 0.001) } } diff --git a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala index ef827fd1da..5d46029103 100644 --- a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala @@ -44,10 +44,10 @@ class DaemonicSpec extends AkkaSpec { // get new non daemonic threads running awaitAssert({ - val newNonDaemons: Set[Thread] = - Thread.getAllStackTraces.keySet().asScala.filter(t => !origThreads(t) && !t.isDaemon).to(Set) - newNonDaemons should ===(Set.empty[Thread]) - }, 4.seconds) + val newNonDaemons: Set[Thread] = + Thread.getAllStackTraces.keySet().asScala.filter(t => !origThreads(t) && !t.isDaemon).to(Set) + newNonDaemons should ===(Set.empty[Thread]) + }, 4.seconds) } finally { shutdown(daemonicSystem) diff --git a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala index ef53a2b61d..7caf3ee1e3 100644 --- a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala @@ -43,18 +43,19 @@ class DeadlineFailureDetectorSpec extends AkkaSpec { val timeInterval = List[Long](0, 1000, 100, 100, 7000) val fd = createFailureDetector(acceptableLostDuration = 4.seconds, clock = fakeTimeGenerator(timeInterval)) - fd.heartbeat() //0 - fd.heartbeat() //1000 - fd.heartbeat() //1100 + fd.heartbeat() // 0 + fd.heartbeat() // 1000 + fd.heartbeat() // 1100 - fd.isAvailable should ===(true) //1200 - fd.isAvailable should ===(false) //8200 + fd.isAvailable should ===(true) // 1200 + fd.isAvailable should ===(false) // 8200 } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { // 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger unreachable again val regularIntervals = 0L +: Vector.fill(999)(1000L) - val timeIntervals = regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L + val timeIntervals = + regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L val fd = createFailureDetector(acceptableLostDuration = 4.seconds, clock = fakeTimeGenerator(timeIntervals)) for (_ <- 0 until 1000) fd.heartbeat() diff --git a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala index 5fab3e139a..e4029b14bd 100644 --- a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala @@ -42,15 +42,14 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { acceptableLostDuration: FiniteDuration = Duration.Zero, firstHeartbeatEstimate: FiniteDuration = 1.second, clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = { - new DefaultFailureDetectorRegistry[String]( - () => - createFailureDetector( - threshold, - maxSampleSize, - minStdDeviation, - acceptableLostDuration, - firstHeartbeatEstimate, - clock)) + new DefaultFailureDetectorRegistry[String](() => + createFailureDetector( + threshold, + maxSampleSize, + minStdDeviation, + acceptableLostDuration, + firstHeartbeatEstimate, + clock)) } "mark node as available after a series of successful heartbeats" in { @@ -68,13 +67,13 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { val timeInterval = List[Long](0, 1000, 100, 100, 4000, 3000) val fd = createFailureDetectorRegistry(threshold = 3, clock = fakeTimeGenerator(timeInterval)) - fd.heartbeat("resource1") //0 - fd.heartbeat("resource1") //1000 - fd.heartbeat("resource1") //1100 + fd.heartbeat("resource1") // 0 + fd.heartbeat("resource1") // 1000 + fd.heartbeat("resource1") // 1100 - fd.isAvailable("resource1") should ===(true) //1200 - fd.heartbeat("resource2") //5200, but unrelated resource - fd.isAvailable("resource1") should ===(false) //8200 + fd.isAvailable("resource1") should ===(true) // 1200 + fd.heartbeat("resource2") // 5200, but unrelated resource + fd.isAvailable("resource1") should ===(false) // 8200 } "accept some configured missing heartbeats" in { @@ -123,25 +122,25 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetectorRegistry(clock = fakeTimeGenerator(timeInterval)) fd.isMonitoring("resource1") should ===(false) - fd.heartbeat("resource1") //0 + fd.heartbeat("resource1") // 0 - fd.heartbeat("resource1") //1000 - fd.heartbeat("resource1") //1100 + fd.heartbeat("resource1") // 1000 + fd.heartbeat("resource1") // 1100 - fd.isAvailable("resource1") should ===(true) //2200 + fd.isAvailable("resource1") should ===(true) // 2200 fd.isMonitoring("resource1") should ===(true) fd.remove("resource1") fd.isMonitoring("resource1") should ===(false) - fd.isAvailable("resource1") should ===(true) //3300 + fd.isAvailable("resource1") should ===(true) // 3300 // it receives heartbeat from an explicitly removed node - fd.heartbeat("resource1") //4400 - fd.heartbeat("resource1") //5500 - fd.heartbeat("resource1") //6600 + fd.heartbeat("resource1") // 4400 + fd.heartbeat("resource1") // 5500 + fd.heartbeat("resource1") // 6600 - fd.isAvailable("resource1") should ===(true) //6700 + fd.isAvailable("resource1") should ===(true) // 6700 fd.isMonitoring("resource1") should ===(true) } diff --git a/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala b/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala index 28263f533f..af1adcab48 100644 --- a/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala @@ -34,11 +34,11 @@ class LogSourceSpec extends AkkaSpec(""" val reporter = system.actorOf(Props[Reporter](), "reporter") val logProbe = TestProbe() system.eventStream.subscribe(system.actorOf(Props(new Actor { - def receive = { - case i @ Info(_, _, msg: String) if msg contains "hello" => logProbe.ref ! i - case _ => - } - }).withDeploy(Deploy.local), "logSniffer"), classOf[Logging.Info]) + def receive = { + case i @ Info(_, _, msg: String) if msg contains "hello" => logProbe.ref ! i + case _ => + } + }).withDeploy(Deploy.local), "logSniffer"), classOf[Logging.Info]) "Log events" must { diff --git a/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala index 0491e120d4..a42137ebf7 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala @@ -146,7 +146,8 @@ class RemoteFeaturesDisabledSpec extends RemoteFeaturesSpec(RemoteFeaturesSpec.d """)) val masterRef = masterSystem.actorOf(Props[RemoteDeploymentSpec.Echo1](), actorName) - masterRef.path shouldEqual RootActorPath(AddressFromURIString(s"akka://${masterSystem.name}")) / "user" / actorName + masterRef.path shouldEqual RootActorPath( + AddressFromURIString(s"akka://${masterSystem.name}")) / "user" / actorName masterRef.path.address.hasLocalScope shouldBe true masterSystem.actorSelection(RootActorPath(address(system)) / "user" / actorName) ! Identify(1) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index 968e7edbc4..c7d2741719 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -76,7 +76,7 @@ object Configuration { assert(ports.size == 2) val (localPort, remotePort) = (ports(0), ports(1)) try { - //if (true) throw new IllegalArgumentException("Ticket1978*Spec isn't enabled") + // if (true) throw new IllegalArgumentException("Ticket1978*Spec isn't enabled") val config = ConfigFactory.parseString(conf.format(localPort, trustStore, keyStore, cipher, enabled.mkString(", "))) @@ -145,11 +145,11 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) def preCondition: Boolean = true - ("-") must { + "-" must { if (cipherConfig.runTest && preCondition) { other.actorOf(Props(new Actor { - def receive = { case ("ping", x) => sender() ! ((("pong", x), sender())) } - }), "echo") + def receive = { case ("ping", x) => sender() ! ((("pong", x), sender())) } + }), "echo") val otherAddress = other.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport.defaultAddress diff --git a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala index 65bdbba80c..36b2e20ce0 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala @@ -57,7 +57,7 @@ class DuplicateHandshakeSpec extends AkkaSpec(""" env } .via(new DuplicateHandshakeReq(numberOfLanes = 3, inboundContext, system.asInstanceOf[ExtendedActorSystem], pool)) - .map { case env: InboundEnvelope => (env.message -> env.lane) } + .map { case env: InboundEnvelope => env.message -> env.lane } .toMat(TestSink.probe[Any])(Keep.both) .run() } diff --git a/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala index c16a4e2885..53ec8fadfe 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala @@ -22,23 +22,23 @@ class FlushOnShutdownSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defaultC val probeRef = probe.ref localSystem.actorOf(Props(new Actor { - def receive = { - case msg => probeRef ! msg - } - }), "receiver") + def receive = { + case msg => probeRef ! msg + } + }), "receiver") val actorOnSystemB = remoteSystem.actorOf(Props(new Actor { - def receive = { - case "start" => - context.actorSelection(rootActorPath(localSystem) / "user" / "receiver") ! Identify(None) + def receive = { + case "start" => + context.actorSelection(rootActorPath(localSystem) / "user" / "receiver") ! Identify(None) - case ActorIdentity(_, Some(receiverRef)) => - receiverRef ! "msg1" - receiverRef ! "msg2" - receiverRef ! "msg3" - context.system.terminate() - } - }), "sender") + case ActorIdentity(_, Some(receiverRef)) => + receiverRef ! "msg1" + receiverRef ! "msg2" + receiverRef ! "msg3" + context.system.terminate() + } + }), "sender") actorOnSystemB ! "start" diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala index 34d53f64f6..13aedd82bf 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala @@ -86,14 +86,14 @@ class RemoteDeathWatchSpec val path = RootActorPath(Address("akka", system.name, "unknownhost", 2552)) / "user" / "subject" system.actorOf(Props(new Actor { - @nowarn - val watchee = RARP(context.system).provider.resolveActorRef(path) - context.watch(watchee) + @nowarn + val watchee = RARP(context.system).provider.resolveActorRef(path) + context.watch(watchee) - def receive = { - case t: Terminated => testActor ! t.actor.path - } - }).withDeploy(Deploy.local), name = "observer2") + def receive = { + case t: Terminated => testActor ! t.actor.path + } + }).withDeploy(Deploy.local), name = "observer2") expectMsg(60.seconds, path) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala index 840358c168..569e4df24d 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala @@ -153,17 +153,17 @@ class RemoteDeploymentSpec val probes = Vector.fill(numParents, numChildren)(TestProbe()(masterSystem)) val childProps = Props[Echo1]() - for (p <- (0 until numParents); c <- (0 until numChildren)) { + for (p <- 0 until numParents; c <- 0 until numChildren) { parents(p).tell((childProps, numMessages), probes(p)(c).ref) } - for (p <- (0 until numParents); c <- (0 until numChildren)) { + for (p <- 0 until numParents; c <- 0 until numChildren) { val probe = probes(p)(c) probe.expectMsgType[ActorRef] // the child } val expectedMessages = (0 until numMessages).toVector - for (p <- (0 until numParents); c <- (0 until numChildren)) { + for (p <- 0 until numParents; c <- 0 until numChildren) { val probe = probes(p)(c) probe.receiveN(numMessages) should equal(expectedMessages) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala index a219bf2e60..6cffc2bf25 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala @@ -80,7 +80,8 @@ class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG } "skip all remote instruments in the message if none are existing" in { - ensureDebugLog("Skipping serialized data in message for RemoteInstrument(s) [1, 10, 31] that has no local match") { + ensureDebugLog( + "Skipping serialized data in message for RemoteInstrument(s) [1, 10, 31] that has no local match") { val p = TestProbe() val instruments = Seq(testInstrument(1, "!"), testInstrument(10, ".."), testInstrument(31, "???")) val riS = remoteInstruments(instruments: _*) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala index bf32bfde66..c98b212c09 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala @@ -88,11 +88,11 @@ class RemoteMessageSerializationSpec extends ArteryMultiNodeSpec with ImplicitSe private def verifySend(msg: Any)(afterSend: => Unit): Unit = { val bigBounceId = s"bigBounce-${ThreadLocalRandom.current.nextInt()}" val bigBounceOther = remoteSystem.actorOf(Props(new Actor { - def receive = { - case x: Int => sender() ! byteStringOfSize(x) - case x => sender() ! x - } - }), bigBounceId) + def receive = { + case x: Int => sender() ! byteStringOfSize(x) + case x => sender() ! x + } + }), bigBounceId) @nowarn val bigBounceHere = RARP(system).provider.resolveActorRef(s"akka://${remoteSystem.name}@localhost:$remotePort/user/$bigBounceId") diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala index b1c58b1040..4360083663 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala @@ -88,10 +88,10 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) "be able to identify a remote actor and ping it" in { systemB.actorOf(Props(new Actor { - def receive = { - case "ping" => sender() ! "pong" - } - }), "echo") + def receive = { + case "ping" => sender() ! "pong" + } + }), "echo") val actorPath = rootB / "user" / "echo" val echoSel = system.actorSelection(actorPath) diff --git a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala index 83bc5cabe5..3f5a5334b8 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala @@ -104,12 +104,12 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli val logProbe = TestProbe() // but instead install our own listener system.eventStream.subscribe(system.actorOf(Props(new Actor { - import Logging._ - def receive = { - case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d - case _ => - } - }).withDeploy(Deploy.local), "debugSniffer"), classOf[Logging.Debug]) + import Logging._ + def receive = { + case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d + case _ => + } + }).withDeploy(Deploy.local), "debugSniffer"), classOf[Logging.Debug]) remoteDaemon ! "hello" logProbe.expectMsgType[Logging.Debug] diff --git a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala index 55f8aaadc3..e925d49ba7 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala @@ -77,13 +77,13 @@ class CompressionIntegrationSpec awaitAssert { val a1 = aManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [A] received: " + a1) - a1.table.version.toInt should be >= (1) + a1.table.version.toInt should be >= 1 a1.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val a1 = aRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [A] received: " + a1) - a1.table.version.toInt should be >= (1) + a1.table.version.toInt should be >= 1 a1.table.dictionary.keySet should contain(echoRefA) // recipient a1.table.dictionary.keySet should contain(testActor) // sender } @@ -92,13 +92,13 @@ class CompressionIntegrationSpec awaitAssert { val b1 = bManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [B] received: " + b1) - b1.table.version.toInt should be >= (1) + b1.table.version.toInt should be >= 1 b1.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val b1 = bRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [B] received: " + b1) - b1.table.version.toInt should be >= (1) + b1.table.version.toInt should be >= 1 b1.table.dictionary.keySet should contain(echoRefB) } } @@ -110,26 +110,26 @@ class CompressionIntegrationSpec echoRefA.tell(TestMessage("hello2"), ignore.ref) val a2 = aManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [A] received more: " + a2) - a2.table.version.toInt should be >= (3) + a2.table.version.toInt should be >= 3 } awaitAssert { echoRefA.tell(TestMessage("hello2"), ignore.ref) val a2 = aRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [A] received more: " + a2) - a2.table.version.toInt should be >= (3) + a2.table.version.toInt should be >= 3 } awaitAssert { echoRefA.tell(TestMessage("hello3"), ignore.ref) val b2 = bManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [B] received more: " + b2) - b2.table.version.toInt should be >= (3) + b2.table.version.toInt should be >= 3 } awaitAssert { echoRefA.tell(TestMessage("hello3"), ignore.ref) val b2 = bRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [B] received more: " + b2) - b2.table.version.toInt should be >= (3) + b2.table.version.toInt should be >= 3 } } } @@ -299,15 +299,15 @@ class CompressionIntegrationSpec awaitAssert { val a2 = aManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [A] received: " + a2) - a2.table.version.toInt should be >= (1) - a2.table.version.toInt should be < (3) + a2.table.version.toInt should be >= 1 + a2.table.version.toInt should be < 3 a2.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val a2 = aRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [A] received: " + a2) - a2.table.version.toInt should be >= (1) - a2.table.version.toInt should be < (3) + a2.table.version.toInt should be >= 1 + a2.table.version.toInt should be < 3 a2.table.dictionary.keySet should contain(echoRefA) // recipient a2.table.dictionary.keySet should contain(testActor) // sender } @@ -316,13 +316,13 @@ class CompressionIntegrationSpec awaitAssert { val b2 = bManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [B2] received: " + b2) - b2.table.version.toInt should be >= (1) + b2.table.version.toInt should be >= 1 b2.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val b2 = bRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [B] received: " + b2) - b2.table.version.toInt should be >= (1) + b2.table.version.toInt should be >= 1 b2.table.dictionary.keySet should contain(echoRefB2) } } @@ -376,13 +376,13 @@ class CompressionIntegrationSpec var currentTable: CompressionTable[ActorRef] = null receivedActorRefCompressionTableProbe.awaitAssert({ - // discard duplicates with awaitAssert until we receive next version - val receivedActorRefCompressionTable = - receivedActorRefCompressionTableProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](10.seconds) + // discard duplicates with awaitAssert until we receive next version + val receivedActorRefCompressionTable = + receivedActorRefCompressionTableProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](10.seconds) - currentTable = receivedActorRefCompressionTable.table - seenTableVersions = currentTable.version :: seenTableVersions - }, max = 10.seconds) + currentTable = receivedActorRefCompressionTable.table + seenTableVersions = currentTable.version :: seenTableVersions + }, max = 10.seconds) // debugging: info("Seen versions: " + seenTableVersions) lastTable = currentTable diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala index c3c668a7ca..f9c5d143b9 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala @@ -12,7 +12,6 @@ import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec /** - * */ class PemManagersProviderSpec extends AnyWordSpec with Matchers { diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala index 64c2565093..caf6d460c5 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala @@ -293,8 +293,8 @@ class RemoteSystem( val sslContextRef = new AtomicReference[SSLContext]() val sslProviderSetup = - SSLEngineProviderSetup( - sys => new ProbedSSLEngineProvider(sys, sslContextRef, sslProviderServerProbe, sslProviderClientProbe)) + SSLEngineProviderSetup(sys => + new ProbedSSLEngineProvider(sys, sslContextRef, sslProviderServerProbe, sslProviderClientProbe)) val actorSystem = newRemoteSystem(Some(configString), Some(name), Some(ActorSystemSetup(sslProviderSetup))) diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala index 4d31ec645d..dfc2be302e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala @@ -16,7 +16,6 @@ import scala.util.control.NonFatal import akka.util.ccompat.JavaConverters._ /** - * */ class TlsResourcesSpec extends AnyWordSpec with Matchers { @@ -34,7 +33,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val sameSan = baseServers + baseClient + baseNode + baseRsaClient sameSan.foreach { prefix => val serverCert = loadCert(s"/ssl/$prefix.example.com.crt") - X509Readers.getAllSubjectNames(serverCert).contains("example.com") mustBe (true) + X509Readers.getAllSubjectNames(serverCert).contains("example.com") mustBe true } } @@ -42,7 +41,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val notExampleSan = arteryNodeSet + baseIslandServer notExampleSan.foreach { prefix => val cert = loadCert(s"/ssl/$prefix.example.com.crt") - X509Readers.getAllSubjectNames(cert).contains("example.com") mustBe (false) + X509Readers.getAllSubjectNames(cert).contains("example.com") mustBe false } } @@ -52,7 +51,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val clients = Set(baseClient, baseNode, baseRsaClient) ++ arteryNodeSet clients.foreach { prefix => val cert = loadCert(s"/ssl/$prefix.example.com.crt") - cert.getExtendedKeyUsage.asScala.contains(clientAuth) mustBe (true) + cert.getExtendedKeyUsage.asScala.contains(clientAuth) mustBe true } } @@ -60,7 +59,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val servers = baseServers + baseIslandServer + baseNode ++ arteryNodeSet servers.foreach { prefix => val serverCert = loadCert(s"/ssl/$prefix.example.com.crt") - serverCert.getExtendedKeyUsage.asScala.contains(serverAuth) mustBe (true) + serverCert.getExtendedKeyUsage.asScala.contains(serverAuth) mustBe true } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala index c02303dcbc..57bd5a0887 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala @@ -8,7 +8,6 @@ import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec /** - * */ class X509ReadersSpec extends AnyWordSpec with Matchers { import TlsResourcesSpec._ diff --git a/akka-remote/src/test/scala/akka/remote/classic/ActorsLeakSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/ActorsLeakSpec.scala index e3e49676cd..96b39896bf 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/ActorsLeakSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/ActorsLeakSpec.scala @@ -85,7 +85,7 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender val initialActors = targets.flatMap(collectLiveActors).toSet - //Clean shutdown case + // Clean shutdown case for (_ <- 1 to 3) { val remoteSystem = @@ -108,7 +108,7 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender // Quarantine an old incarnation case for (_ <- 1 to 3) { - //always use the same address + // always use the same address val remoteSystem = ActorSystem( "remote", diff --git a/akka-remote/src/test/scala/akka/remote/classic/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/RemoteDeathWatchSpec.scala index 43c2464c7e..ba9bd35d4f 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/RemoteDeathWatchSpec.scala @@ -94,14 +94,14 @@ akka.actor.warn-about-java-serializer-usage = off val path = RootActorPath(Address(protocol, system.name, "unknownhost", 2552)) / "user" / "subject" system.actorOf(Props(new Actor { - @nowarn - val watchee = RARP(context.system).provider.resolveActorRef(path) - context.watch(watchee) + @nowarn + val watchee = RARP(context.system).provider.resolveActorRef(path) + context.watch(watchee) - def receive = { - case t: Terminated => testActor ! t.actor.path - } - }).withDeploy(Deploy.local), name = "observer2") + def receive = { + case t: Terminated => testActor ! t.actor.path + } + }).withDeploy(Deploy.local), name = "observer2") expectMsg(60.seconds, path) } @@ -115,7 +115,8 @@ akka.actor.warn-about-java-serializer-usage = off "quarantine systems after unsuccessful system message delivery if have not communicated before" in { // Synthesize an ActorRef to a remote system this one has never talked to before. // This forces ReliableDeliverySupervisor to start with unknown remote system UID. - val extinctPath = RootActorPath(Address(protocol, "extinct-system", "localhost", SocketUtil.temporaryLocalPort())) / "user" / "noone" + val extinctPath = RootActorPath(Address(protocol, "extinct-system", "localhost", + SocketUtil.temporaryLocalPort())) / "user" / "noone" val transport = RARP(system).provider.transport val extinctRef = new RemoteActorRef( transport, diff --git a/akka-remote/src/test/scala/akka/remote/classic/RemoteInitErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/RemoteInitErrorSpec.scala index 066c8f227c..9b34911de6 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/RemoteInitErrorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/RemoteInitErrorSpec.scala @@ -41,7 +41,7 @@ class RemoteInitErrorSpec extends AnyWordSpec with Matchers { def currentThreadIds(): Set[Long] = { val threads = Thread.getAllStackTraces().keySet() - threads.asScala.collect({ case t: Thread if (!t.isDaemon()) => t.getId() }) + threads.asScala.collect { case t: Thread if !t.isDaemon() => t.getId() } } "Remoting" must { diff --git a/akka-remote/src/test/scala/akka/remote/classic/RemotingSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/RemotingSpec.scala index fbee46f004..750486158a 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/RemotingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/RemotingSpec.scala @@ -165,11 +165,11 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D private def verifySend(msg: Any)(afterSend: => Unit): Unit = { val bigBounceId = s"bigBounce-${ThreadLocalRandom.current.nextInt()}" val bigBounceOther = remoteSystem.actorOf(Props(new Actor { - def receive = { - case x: Int => sender() ! byteStringOfSize(x) - case x => sender() ! x - } - }).withDeploy(Deploy.local), bigBounceId) + def receive = { + case x: Int => sender() ! byteStringOfSize(x) + case x => sender() ! x + } + }).withDeploy(Deploy.local), bigBounceId) val bigBounceHere = RARP(system).provider.resolveActorRef(s"akka.test://remote-sys@localhost:12346/user/$bigBounceId") @@ -331,11 +331,11 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "select actors across node boundaries" in { val l = system.actorOf(Props(new Actor { - def receive = { - case (p: Props, n: String) => sender() ! context.actorOf(p, n) - case ActorSelReq(s) => sender() ! context.actorSelection(s) - } - }), "looker2") + def receive = { + case (p: Props, n: String) => sender() ! context.actorOf(p, n) + case ActorSelReq(s) => sender() ! context.actorSelection(s) + } + }), "looker2") // child is configured to be deployed on remoteSystem l ! ((Props[Echo1](), "child")) val child = expectMsgType[ActorRef] @@ -432,7 +432,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "be able to use multiple transports and use the appropriate one (TCP)" in { val r = system.actorOf(Props[Echo1](), "gonk") r.path.toString should ===( - s"akka.tcp://remote-sys@localhost:${port(remoteSystem, "tcp")}/remote/akka.tcp/RemotingSpec@localhost:${port(system, "tcp")}/user/gonk") + s"akka.tcp://remote-sys@localhost:${port(remoteSystem, "tcp")}/remote/akka.tcp/RemotingSpec@localhost:${port( + system, "tcp")}/user/gonk") r ! 42 expectMsg(42) EventFilter[Exception]("crash", occurrences = 1).intercept { @@ -448,7 +449,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "be able to use multiple transports and use the appropriate one (SSL)" in { val r = system.actorOf(Props[Echo1](), "roghtaar") r.path.toString should ===( - s"akka.ssl.tcp://remote-sys@localhost:${port(remoteSystem, "ssl.tcp")}/remote/akka.ssl.tcp/RemotingSpec@localhost:${port(system, "ssl.tcp")}/user/roghtaar") + s"akka.ssl.tcp://remote-sys@localhost:${port(remoteSystem, + "ssl.tcp")}/remote/akka.ssl.tcp/RemotingSpec@localhost:${port(system, "ssl.tcp")}/user/roghtaar") r ! 42 expectMsg(10.seconds, 42) EventFilter[Exception]("crash", occurrences = 1).intercept { diff --git a/akka-remote/src/test/scala/akka/remote/classic/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/UntrustedSpec.scala index 4983b2659f..59c29a9b3b 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/UntrustedSpec.scala @@ -133,12 +133,12 @@ akka.actor.serialization-bindings { val logProbe = TestProbe() // but instead install our own listener system.eventStream.subscribe(system.actorOf(Props(new Actor { - import Logging._ - def receive = { - case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d - case _ => - } - }).withDeploy(Deploy.local), "debugSniffer"), classOf[Logging.Debug]) + import Logging._ + def receive = { + case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d + case _ => + } + }).withDeploy(Deploy.local), "debugSniffer"), classOf[Logging.Debug]) remoteDaemon ! "hello" logProbe.expectMsgType[Logging.Debug] diff --git a/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolSpec.scala index f1db744eb1..e88e6a61f5 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolSpec.scala @@ -355,7 +355,7 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit wrappedHandle.readHandlerPromise.success(ActorHandleEventListener(testActor)) - //wait for one heartbeat + // wait for one heartbeat awaitCond(lastActivityIsHeartbeat(registry)) failureDetector.isAvailable = false diff --git a/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolStressTest.scala b/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolStressTest.scala index c3b6bb5557..cee844d290 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolStressTest.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/transport/AkkaProtocolStressTest.scala @@ -96,10 +96,10 @@ class AkkaProtocolStressTest extends AkkaSpec(configA) with ImplicitSender with val systemB = ActorSystem("systemB", system.settings.config) val remote = systemB.actorOf(Props(new Actor { - def receive = { - case seq: Int => sender() ! seq - } - }), "echo") + def receive = { + case seq: Int => sender() ! seq + } + }), "echo") val addressB = systemB.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val rootB = RootActorPath(addressB) diff --git a/akka-remote/src/test/scala/akka/remote/classic/transport/SwitchableLoggedBehaviorSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/transport/SwitchableLoggedBehaviorSpec.scala index fd905e4d42..8130c6b695 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/transport/SwitchableLoggedBehaviorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/transport/SwitchableLoggedBehaviorSpec.scala @@ -19,7 +19,7 @@ object SwitchableLoggedBehaviorSpec { class SwitchableLoggedBehaviorSpec extends AkkaSpec with DefaultTimeout { import akka.remote.classic.transport.SwitchableLoggedBehaviorSpec._ - private def defaultBehavior = new SwitchableLoggedBehavior[Unit, Int]((_) => Future.successful(3), (_) => ()) + private def defaultBehavior = new SwitchableLoggedBehavior[Unit, Int](_ => Future.successful(3), _ => ()) "A SwitchableLoggedBehavior" must { @@ -32,10 +32,10 @@ class SwitchableLoggedBehaviorSpec extends AkkaSpec with DefaultTimeout { "be able to push generic behavior" in { val behavior = defaultBehavior - behavior.push((_) => Future.successful(4)) + behavior.push(_ => Future.successful(4)) Await.result(behavior(()), timeout.duration) should ===(4) - behavior.push((_) => Future.failed(TestException)) + behavior.push(_ => Future.failed(TestException)) behavior(()).value match { case Some(Failure(`TestException`)) => case _ => fail("Expected exception") diff --git a/akka-remote/src/test/scala/akka/remote/classic/transport/ThrottlerTransportAdapterSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/transport/ThrottlerTransportAdapterSpec.scala index e896504cc1..447706be82 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/transport/ThrottlerTransportAdapterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/transport/ThrottlerTransportAdapterSpec.scala @@ -129,13 +129,13 @@ class ThrottlerTransportAdapterSpec extends AkkaSpec(configA) with ImplicitSende // of the connection, repeat until success here ! Lost("Blackhole 3") awaitCond({ - if (receiveOne(Duration.Zero) == Lost("Blackhole 3")) - true - else { - here ! Lost("Blackhole 3") - false - } - }, 15.seconds) + if (receiveOne(Duration.Zero) == Lost("Blackhole 3")) + true + else { + here ! Lost("Blackhole 3") + false + } + }, 15.seconds) here ! "Cleanup" fishForMessage(5.seconds) { diff --git a/akka-remote/src/test/scala/akka/remote/classic/transport/netty/NettyTransportSpec.scala b/akka-remote/src/test/scala/akka/remote/classic/transport/netty/NettyTransportSpec.scala index 0782ab3dc5..4d6080f164 100644 --- a/akka-remote/src/test/scala/akka/remote/classic/transport/netty/NettyTransportSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/classic/transport/netty/NettyTransportSpec.scala @@ -61,7 +61,7 @@ class NettyTransportSpec extends AnyWordSpec with Matchers with BindBehavior { } "bind to a random port but remoting accepts from a specified port" in { - //keep open to ensure it isn't used for the bind-port + // keep open to ensure it isn't used for the bind-port val (openSS, address) = randomOpenServerSocket() try { diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala index 1d2dd79058..4b129f8c50 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala @@ -65,8 +65,8 @@ import akka.util.OptionVal val name = clazz.getSimpleName // looking for "AbstractBeanFactoryPointcutAdvisor" but no point to allow any is there? if ("AbstractPointcutAdvisor".equals(name) - // ditto for "FileSystemXmlApplicationContext": block all ApplicationContexts - || "AbstractApplicationContext".equals(name)) + // ditto for "FileSystemXmlApplicationContext": block all ApplicationContexts + || "AbstractApplicationContext".equals(name)) false else isAllowedSpringClass(clazz.getSuperclass) @@ -101,7 +101,7 @@ import akka.util.OptionVal } object LZ4Meta { - val LZ4_MAGIC = 0x87d96df6 // The last 4 bytes of `printf akka | sha512sum` + val LZ4_MAGIC = 0x87D96DF6 // The last 4 bytes of `printf akka | sha512sum` def apply(bytes: Array[Byte]): LZ4Meta = { LZ4Meta(8, bytes.length) diff --git a/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala b/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala index d4fe71abe2..3cdf6b865a 100644 --- a/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala +++ b/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala @@ -119,7 +119,7 @@ object ScalaTestMessages { extends TestMessage // #jackson-scala-enumeration - //delegate to AkkaSerialization + // delegate to AkkaSerialization object HasAkkaSerializer { def apply(description: String): HasAkkaSerializer = new HasAkkaSerializer(description) } @@ -460,16 +460,17 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "be possible to create custom ObjectMapper" in { val customJavaTimeModule = new SimpleModule() { import com.fasterxml.jackson.databind.ser.std._ - addSerializer(classOf[Instant], new StdSerializer[Instant](classOf[Instant]) { - override def serialize(value: Instant, gen: JsonGenerator, provider: SerializerProvider): Unit = { - gen.writeStartObject() - gen.writeFieldName("nanos") - gen.writeNumber(value.getNano) - gen.writeFieldName("custom") - gen.writeString("field") - gen.writeEndObject() - } - }) + addSerializer(classOf[Instant], + new StdSerializer[Instant](classOf[Instant]) { + override def serialize(value: Instant, gen: JsonGenerator, provider: SerializerProvider): Unit = { + gen.writeStartObject() + gen.writeFieldName("nanos") + gen.writeNumber(value.getNano) + gen.writeFieldName("custom") + gen.writeString("field") + gen.writeEndObject() + } + }) } val customJacksonObjectMapperFactory = new JacksonObjectMapperFactory { diff --git a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala index 69e927d564..1b43d90dd8 100644 --- a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala +++ b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala @@ -6,7 +6,7 @@ package doc.akka.serialization.jackson object CustomAdtSerializer { - //#adt-trait-object + // #adt-trait-object import com.fasterxml.jackson.core.JsonGenerator import com.fasterxml.jackson.core.JsonParser import com.fasterxml.jackson.databind.DeserializationContext @@ -55,5 +55,5 @@ object CustomAdtSerializer { } final case class Compass(currentDirection: Direction) extends MySerializable - //#adt-trait-object + // #adt-trait-object } diff --git a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala index 0cd6e7de90..21b08b8473 100644 --- a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala +++ b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala @@ -131,7 +131,7 @@ object SerializationDocSpec { object Polymorphism { - //#polymorphism + // #polymorphism final case class Zoo(primaryAttraction: Animal) extends MySerializable @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @@ -144,12 +144,12 @@ object SerializationDocSpec { final case class Lion(name: String) extends Animal final case class Elephant(name: String, age: Int) extends Animal - //#polymorphism + // #polymorphism } object PolymorphismMixedClassObject { - //#polymorphism-case-object + // #polymorphism-case-object final case class Zoo(primaryAttraction: Animal) extends MySerializable @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @@ -172,7 +172,7 @@ object SerializationDocSpec { // whenever we need to deserialize an instance of Unicorn trait, we return the object Unicorn override def deserialize(p: JsonParser, ctxt: DeserializationContext): Unicorn = Unicorn } - //#polymorphism-case-object + // #polymorphism-case-object } val configDateTime = """ diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala index 70c96fbd2a..c04441b423 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala @@ -4,7 +4,7 @@ package akka.event.slf4j -import org.slf4j.{ MDC, Marker, MarkerFactory, Logger => SLFLogger, LoggerFactory => SLFLoggerFactory } +import org.slf4j.{ Logger => SLFLogger, LoggerFactory => SLFLoggerFactory, MDC, Marker, MarkerFactory } import akka.actor._ import akka.dispatch.RequiresMessageQueue diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala index d41af6bdd8..65c61d5247 100644 --- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala +++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala @@ -42,7 +42,7 @@ object Slf4jLoggingFilterSpec { sender() ! LoggerInitialized case SetTarget(ref) => target = Some(ref) - ref ! ("OK") + ref ! "OK" case event: LogEvent => println("# event: " + event) target.foreach { _ ! event } diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala index 90f8eb3718..82504d76b6 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala @@ -97,7 +97,7 @@ object TestPublisher { @ccompatUsedUntil213 private val probe: TestProbe = TestProbe() - //this is a way to pause receiving message from probe until subscription is done + // this is a way to pause receiving message from probe until subscription is done private val subscribed = new CountDownLatch(1) probe.ignoreMsg { case SubscriptionDone => true } probe.setAutoPilot(new TestActor.AutoPilot() { @@ -690,7 +690,6 @@ object TestSubscriber { /** * Expect a stream element and test it with partial function. - * */ def expectNextPF[T](f: PartialFunction[Any, T]): T = expectNextWithTimeoutPF(Duration.Undefined, f) @@ -701,9 +700,10 @@ object TestSubscriber { * @param max wait no more than max time, otherwise throw AssertionError */ def expectNextWithTimeoutPF[T](max: Duration, f: PartialFunction[Any, T]): T = - expectEventWithTimeoutPF(max, { - case OnNext(n) if f.isDefinedAt(n) => f(n) - }) + expectEventWithTimeoutPF(max, + { + case OnNext(n) if f.isDefinedAt(n) => f(n) + }) /** * Expect a stream element during specified time or timeout and test it with partial function. diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala index c706634a73..9cedc115e2 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala @@ -49,10 +49,11 @@ object StreamTestKit { val timeout = c.getDuration("all-stages-stopped-timeout", MILLISECONDS).millis probe.within(timeout) { try probe.awaitAssert { - supervisor.tell(StreamSupervisor.GetChildren, probe.ref) - val children = probe.expectMsgType[StreamSupervisor.Children].children - assert(children.isEmpty, s"expected no StreamSupervisor children, but got [${children.mkString(", ")}]") - } catch { + supervisor.tell(StreamSupervisor.GetChildren, probe.ref) + val children = probe.expectMsgType[StreamSupervisor.Children].children + assert(children.isEmpty, s"expected no StreamSupervisor children, but got [${children.mkString(", ")}]") + } + catch { case ex: Throwable => import sys.dispatcher printDebugDump(supervisor) diff --git a/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala b/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala index 34a65582a8..52bc0784ee 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala @@ -237,7 +237,7 @@ object GraphInterpreterSpecKit { } private def setPortIds(stage: GraphStageWithMaterializedValue[_ <: Shape, _]): Unit = { - stage.shape.inlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } + stage.shape.inlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } stage.shape.outlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } } @@ -268,25 +268,27 @@ trait GraphInterpreterSpecKit extends StreamSpec { out.id = 0 override def toString = "Upstream" - setHandler(out, new OutHandler { - override def onPull() = { - // TODO handler needed but should it do anything? - } + setHandler(out, + new OutHandler { + override def onPull() = { + // TODO handler needed but should it do anything? + } - override def toString = "Upstream.OutHandler" - }) + override def toString = "Upstream.OutHandler" + }) } object Downstream extends DownstreamBoundaryStageLogic[Int] { override val in = Inlet[Int]("down") in.id = 0 - setHandler(in, new InHandler { - override def onPush() = { - // TODO handler needed but should it do anything? - } + setHandler(in, + new InHandler { + override def onPush() = { + // TODO handler needed but should it do anything? + } - override def toString = "Downstream.InHandler" - }) + override def toString = "Downstream.InHandler" + }) override def toString = "Downstream" } @@ -372,11 +374,12 @@ trait GraphInterpreterSpecKit extends StreamSpec { val out = Outlet[T]("out") out.id = 0 - setHandler(out, new OutHandler { - override def onPull(): Unit = lastEvent += RequestOne(UpstreamProbe.this) - override def onDownstreamFinish(cause: Throwable): Unit = lastEvent += Cancel(UpstreamProbe.this, cause) - override def toString = s"${UpstreamProbe.this.toString}.outHandler" - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = lastEvent += RequestOne(UpstreamProbe.this) + override def onDownstreamFinish(cause: Throwable): Unit = lastEvent += Cancel(UpstreamProbe.this, cause) + override def toString = s"${UpstreamProbe.this.toString}.outHandler" + }) def onNext(elem: T, eventLimit: Int = Int.MaxValue): Unit = { if (GraphInterpreter.Debug) println(s"----- NEXT: $this $elem") @@ -401,12 +404,13 @@ trait GraphInterpreterSpecKit extends StreamSpec { val in = Inlet[T]("in") in.id = 0 - setHandler(in, new InHandler { - override def onPush(): Unit = lastEvent += OnNext(DownstreamProbe.this, grab(in)) - override def onUpstreamFinish(): Unit = lastEvent += OnComplete(DownstreamProbe.this) - override def onUpstreamFailure(ex: Throwable): Unit = lastEvent += OnError(DownstreamProbe.this, ex) - override def toString = s"${DownstreamProbe.this.toString}.inHandler" - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = lastEvent += OnNext(DownstreamProbe.this, grab(in)) + override def onUpstreamFinish(): Unit = lastEvent += OnComplete(DownstreamProbe.this) + override def onUpstreamFailure(ex: Throwable): Unit = lastEvent += OnError(DownstreamProbe.this, ex) + override def toString = s"${DownstreamProbe.this.toString}.inHandler" + }) def requestOne(eventLimit: Int = Int.MaxValue): Unit = { if (GraphInterpreter.Debug) println(s"----- REQ $this") @@ -468,21 +472,22 @@ trait GraphInterpreterSpecKit extends StreamSpec { def cancel(): Unit = cancel(this.in) def grab(): T = grab(this.in) - setHandler(this.in, new InHandler { + setHandler(this.in, + new InHandler { - // Modified onPush that does not grab() automatically the element. This accesses some internals. - override def onPush(): Unit = { - val internalEvent = portToConn(DownstreamPortProbe.this.in.id).slot + // Modified onPush that does not grab() automatically the element. This accesses some internals. + override def onPush(): Unit = { + val internalEvent = portToConn(DownstreamPortProbe.this.in.id).slot - internalEvent match { - case Failed(_, elem) => lastEvent += OnNext(DownstreamPortProbe.this, elem) - case elem => lastEvent += OnNext(DownstreamPortProbe.this, elem) + internalEvent match { + case Failed(_, elem) => lastEvent += OnNext(DownstreamPortProbe.this, elem) + case elem => lastEvent += OnNext(DownstreamPortProbe.this, elem) + } } - } - override def onUpstreamFinish() = lastEvent += OnComplete(DownstreamPortProbe.this) - override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(DownstreamPortProbe.this, ex) - }) + override def onUpstreamFinish() = lastEvent += OnComplete(DownstreamPortProbe.this) + override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(DownstreamPortProbe.this, ex) + }) } val (logics, connections) = @@ -531,18 +536,20 @@ trait GraphInterpreterSpecKit extends StreamSpec { } } - setHandler(stagein, new InHandler { - override def onPush(): Unit = mayFail(push(stageout, grab(stagein))) - override def onUpstreamFinish(): Unit = mayFail(completeStage()) - override def onUpstreamFailure(ex: Throwable): Unit = mayFail(failStage(ex)) - override def toString = "insideOutStage.stagein" - }) + setHandler(stagein, + new InHandler { + override def onPush(): Unit = mayFail(push(stageout, grab(stagein))) + override def onUpstreamFinish(): Unit = mayFail(completeStage()) + override def onUpstreamFailure(ex: Throwable): Unit = mayFail(failStage(ex)) + override def toString = "insideOutStage.stagein" + }) - setHandler(stageout, new OutHandler { - override def onPull(): Unit = mayFail(pull(stagein)) - override def onDownstreamFinish(cause: Throwable): Unit = mayFail(completeStage()) - override def toString = "insideOutStage.stageout" - }) + setHandler(stageout, + new OutHandler { + override def onPull(): Unit = mayFail(pull(stagein)) + override def onDownstreamFinish(cause: Throwable): Unit = mayFail(completeStage()) + override def toString = "insideOutStage.stageout" + }) override def preStart(): Unit = mayFail(lastEvent += PreStart(insideOutStage)) override def postStop(): Unit = @@ -647,16 +654,17 @@ trait GraphInterpreterSpecKit extends StreamSpec { val in = Inlet[TT]("in") in.id = 0 - setHandler(in, new InHandler { + setHandler(in, + new InHandler { - // Modified onPush that does not grab() automatically the element. This accesses some internals. - override def onPush(): Unit = { - lastEvent += OnNext(grab(in)) - } + // Modified onPush that does not grab() automatically the element. This accesses some internals. + override def onPush(): Unit = { + lastEvent += OnNext(grab(in)) + } - override def onUpstreamFinish() = lastEvent += OnComplete - override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(ex) - }) + override def onUpstreamFinish() = lastEvent += OnComplete + override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(ex) + }) def requestOne(): Unit = { pull(in) diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala index 1eaa92e4ca..2dcfdc26ac 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala @@ -120,8 +120,8 @@ trait ScriptedTest extends Matchers { def debug: String = s"Script(pending=($pendingIns in, $pendingOuts out), remainingIns=${providedInputs - .drop(inputCursor) - .mkString("/")}, remainingOuts=${expectedOutputs.drop(outputCursor).mkString("/")})" + .drop(inputCursor) + .mkString("/")}, remainingOuts=${expectedOutputs.drop(outputCursor).mkString("/")})" } class ScriptRunner[In, Out, M]( diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala index b85e84cf20..77548c7e31 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala @@ -130,10 +130,11 @@ class StreamTestKitSpec extends AkkaSpec { .tick(initialDelay, 1.millis, 1) .runWith(TestSink.probe) .request(1) - .expectNextWithTimeoutPF(timeout, { - case 1 => - system.log.info("Message received :(") - }) + .expectNextWithTimeoutPF(timeout, + { + case 1 => + system.log.info("Message received :(") + }) }.getMessage should include("timeout") } @@ -173,10 +174,11 @@ class StreamTestKitSpec extends AkkaSpec { .tick(initialDelay, 1.millis, 1) .runWith(TestSink.probe) .request(1) - .expectNextChainingPF(timeout, { - case 1 => - system.log.info("Message received :(") - }) + .expectNextChainingPF(timeout, + { + case 1 => + system.log.info("Message received :(") + }) }.getMessage should include("timeout") } diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala index 5a27fb5e37..fa892f7eb4 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala @@ -31,7 +31,7 @@ class TestPublisherSubscriberSpec extends AkkaSpec(""" upstreamSubscription.sendNext(1) downstreamSubscription.request(1) upstream.expectEventPF { case RequestMore(_, e) => e } should ===(1L) - downstream.expectEventPF { case OnNext(e) => e } should ===(1) + downstream.expectEventPF { case OnNext(e) => e } should ===(1) upstreamSubscription.sendNext(1) downstreamSubscription.request(1) @@ -53,7 +53,7 @@ class TestPublisherSubscriberSpec extends AkkaSpec(""" upstreamSubscription.sendNext(1) downstreamSubscription.request(1) - an[AssertionError] should be thrownBy upstream.expectEventPF { case Subscribe(e) => e } + an[AssertionError] should be thrownBy upstream.expectEventPF { case Subscribe(e) => e } an[AssertionError] should be thrownBy downstream.expectNextPF[String] { case e: String => e } upstreamSubscription.sendComplete() diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala index f10e45427d..37a7754b98 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala @@ -28,7 +28,7 @@ abstract class TwoStreamsSetup extends BaseTwoStreamsSetup { Source.fromPublisher(p1) ~> f.left Source.fromPublisher(p2) ~> f.right - f.out ~> Sink.fromSubscriber(subscriber) + f.out ~> Sink.fromSubscriber(subscriber) ClosedShape }) .run() diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala index 6ae9628e9a..5ca021e3ff 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala @@ -38,6 +38,7 @@ abstract class AkkaPublisherVerification[T](val env: TestEnvironment, publisherS def iterable(elements: Long): immutable.Iterable[Int] = if (elements > Int.MaxValue) - new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } else + new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } + else 0 until elements.toInt } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala index de6f5f5006..7ef08a4bf6 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala @@ -15,7 +15,8 @@ class FanoutPublisherTest extends AkkaPublisherVerification[Int] { def createPublisher(elements: Long): Publisher[Int] = { val iterable: immutable.Iterable[Int] = - if (elements == 0) new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } else + if (elements == 0) new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } + else 0 until elements.toInt Source(iterable).runWith(Sink.asPublisher(true)) diff --git a/akka-stream-tests/src/test/scala-jdk9-only/akka/stream/scaladsl/FlowPublisherSinkSpec.scala b/akka-stream-tests/src/test/scala-jdk9-only/akka/stream/scaladsl/FlowPublisherSinkSpec.scala index e8776c6b73..7332ca5f64 100644 --- a/akka-stream-tests/src/test/scala-jdk9-only/akka/stream/scaladsl/FlowPublisherSinkSpec.scala +++ b/akka-stream-tests/src/test/scala-jdk9-only/akka/stream/scaladsl/FlowPublisherSinkSpec.scala @@ -17,9 +17,11 @@ class FlowPublisherSinkSpec extends StreamSpec { "A FlowPublisherSink" must { "work with SubscriberSource" in { - val (sub, pub) = JavaFlowSupport.Source.asSubscriber[Int].toMat(JavaFlowSupport.Sink.asPublisher(false))(Keep.both).run() + val (sub, pub) = + JavaFlowSupport.Source.asSubscriber[Int].toMat(JavaFlowSupport.Sink.asPublisher(false))(Keep.both).run() Source(1 to 100).to(JavaFlowSupport.Sink.fromSubscriber(sub)).run() - Await.result(JavaFlowSupport.Source.fromPublisher(pub).limit(1000).runWith(Sink.seq), 3.seconds) should ===(1 to 100) + Await.result(JavaFlowSupport.Source.fromPublisher(pub).limit(1000).runWith(Sink.seq), 3.seconds) should ===( + 1 to 100) } "be able to use Publisher in materialized value transformation" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala index 70688c4f01..3c67bde829 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala @@ -47,7 +47,7 @@ class FusingSpec extends StreamSpec { .futureValue .sorted should ===(0 to 9) val refs = receiveN(20) - refs.toSet should have size (11) // main flow + 10 subflows + refs.toSet should have size 11 // main flow + 10 subflows } "use multiple actors when there are asynchronous boundaries in the subflows (operator)" in { @@ -59,7 +59,7 @@ class FusingSpec extends StreamSpec { .futureValue .sorted should ===(0 to 9) val refs = receiveN(20) - refs.toSet should have size (11) // main flow + 10 subflows + refs.toSet should have size 11 // main flow + 10 subflows } "use one actor per grouped substream when there is an async boundary around the flow (manual)" in { @@ -92,7 +92,7 @@ class FusingSpec extends StreamSpec { refs.toSet should have size (in.size + 1) // outer/main actor + 1 actor per subflow } - //an UnfoldResourceSource equivalent without an async boundary + // an UnfoldResourceSource equivalent without an async boundary case class UnfoldResourceNoAsyncBoundry[T, S](create: () => S, readData: (S) => Option[T], close: (S) => Unit) extends GraphStage[SourceShape[T]] { val stage_ = new UnfoldResourceSource(create, readData, close) @@ -107,7 +107,7 @@ class FusingSpec extends StreamSpec { val slowInitSrc = UnfoldResourceNoAsyncBoundry( () => { Await.result(promise.future, 1.minute); () }, (_: Unit) => Some(1), - (_: Unit) => ()).asSource.watchTermination()(Keep.right).async //commenting this out, makes the test pass + (_: Unit) => ()).asSource.watchTermination()(Keep.right).async // commenting this out, makes the test pass val downstream = Flow[Int] .prepend(Source.single(1)) .flatMapPrefix(0) { @@ -122,13 +122,13 @@ class FusingSpec extends StreamSpec { val (f1, f2) = g.run() f2.failed.futureValue shouldEqual TE("I hate mondays") f1.value should be(empty) - //by now downstream managed to fail, hence it already processed the message from Flow.single, - //hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) - //hence upstream subscription was initiated. - //since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request - //since a blocked actor can not process additional messages from its inbox. - //so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. - //prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. + // by now downstream managed to fail, hence it already processed the message from Flow.single, + // hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) + // hence upstream subscription was initiated. + // since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request + // since a blocked actor can not process additional messages from its inbox. + // so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. + // prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. promise.success(Done) f1.failed.futureValue shouldEqual TE("I hate mondays") } @@ -138,7 +138,7 @@ class FusingSpec extends StreamSpec { val slowInitSrc = UnfoldResourceNoAsyncBoundry( () => { Await.result(promise.future, 1.minute); () }, (_: Unit) => Some(1), - (_: Unit) => ()).asSource.watchTermination()(Keep.right).async //commenting this out, makes the test pass + (_: Unit) => ()).asSource.watchTermination()(Keep.right).async // commenting this out, makes the test pass val failingSrc = Source.failed(TE("I hate mondays")).watchTermination()(Keep.right) @@ -147,13 +147,13 @@ class FusingSpec extends StreamSpec { val (f1, f2) = g.run() f2.failed.futureValue shouldEqual TE("I hate mondays") f1.value should be(empty) - //by now downstream managed to fail, hence it already processed the message from Flow.single, - //hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) - //hence upstream subscription was initiated. - //since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request - //since a blocked actor can not process additional messages from its inbox. - //so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. - //prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. + // by now downstream managed to fail, hence it already processed the message from Flow.single, + // hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) + // hence upstream subscription was initiated. + // since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request + // since a blocked actor can not process additional messages from its inbox. + // so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. + // prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. promise.success(Done) f1.failed.futureValue shouldEqual TE("I hate mondays") } diff --git a/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala index 5fa4f67ed1..2c39cc70c2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala @@ -29,8 +29,8 @@ class StreamAttributeDocSpec extends StreamSpec("my-stream-dispatcher = \"akka.t .map(_.toString) .toMat(Sink.foreach(println))(Keep.right) .withAttributes(Attributes.inputBuffer(4, 4) and - ActorAttributes.dispatcher("my-stream-dispatcher") and - TcpAttributes.tcpWriteBufferSize(2048)) + ActorAttributes.dispatcher("my-stream-dispatcher") and + TcpAttributes.tcpWriteBufferSize(2048)) stream.run() // #attributes-on-stream diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala index 6e315bb9ae..46ad87fa62 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala @@ -61,17 +61,19 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S val out = Outlet[Int]("out") override val shape = FlowShape(in, out) override def createLogic(attr: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onUpstreamFinish(): Unit = { - emit(out, 5, () => emit(out, 6)) - emit(out, 7, () => emit(out, 8)) - completeStage() - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onUpstreamFinish(): Unit = { + emit(out, 5, () => emit(out, 6)) + emit(out, 7, () => emit(out, 8)) + completeStage() + } + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) } } @@ -80,15 +82,17 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S val out = Outlet[Int]("out") override val shape = FlowShape(in, out) override def createLogic(attr: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onUpstreamFinish(): Unit = complete(out) - override def toString = "InHandler" - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - override def toString = "OutHandler" - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onUpstreamFinish(): Unit = complete(out) + override def toString = "InHandler" + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + override def toString = "OutHandler" + }) override def toString = "GraphStageLogicSpec.passthroughLogic" } override def toString = "GraphStageLogicSpec.passthrough" @@ -99,9 +103,10 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override val shape = SourceShape(out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(out, new OutHandler { - override def onPull(): Unit = emitMultiple(out, Iterator.empty, () => emit(out, 42, () => completeStage())) - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = emitMultiple(out, Iterator.empty, () => emit(out, 42, () => completeStage())) + }) } override def toString = "GraphStageLogicSpec.emitEmptyIterable" } @@ -114,7 +119,7 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S setHandler(shape.in, EagerTerminateInput) setHandler(shape.out, EagerTerminateOutput) override def preStart(): Unit = - readN(shape.in, n)(e => emitMultiple(shape.out, e.iterator, () => completeStage()), (_) => ()) + readN(shape.in, n)(e => emitMultiple(shape.out, e.iterator, () => completeStage()), _ => ()) } } @@ -168,7 +173,7 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S .runWith(TestSink.probe) .request(5) .expectNext(1) - //emitting with callback gives nondeterminism whether 2 or 3 will be pushed first + // emitting with callback gives nondeterminism whether 2 or 3 will be pushed first .expectNextUnordered(2, 3) .expectNext(4) .expectComplete() @@ -188,12 +193,13 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override val shape = FlowShape(in, out) override def createLogic(attr: Attributes) = new GraphStageLogic(shape) { setHandler(in, eagerTerminateInput) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - completeStage() - testActor ! "pulled" - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + completeStage() + testActor ! "pulled" + } + }) override def preStart(): Unit = testActor ! "preStart" override def postStop(): Unit = testActor ! "postStop" } @@ -295,9 +301,10 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler(in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } @@ -321,9 +328,10 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler(in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } @@ -346,9 +354,10 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler(in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } @@ -370,9 +379,10 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler(in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala index 7b691f2bb7..b4a5fa9e12 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala @@ -79,12 +79,13 @@ class SubInletOutletSpec extends StreamSpec { } }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (!subIn.hasBeenPulled) - subIn.pull() - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + if (!subIn.hasBeenPulled) + subIn.pull() + } + }) } } @@ -164,18 +165,19 @@ class SubInletOutletSpec extends StreamSpec { }) } - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - elem match { - case "completeStage" => completeStage() - case "cancelStage" => cancelStage(NoMoreElementsNeeded) - case "failStage" => failStage(TE("boom")) - case "completeAll" => cancel(in) - case other => subOut.push(other) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + elem match { + case "completeStage" => completeStage() + case "cancelStage" => cancelStage(NoMoreElementsNeeded) + case "failStage" => failStage(TE("boom")) + case "completeAll" => cancel(in) + case other => subOut.push(other) + } } - } - }) + }) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala index e703b67787..9952cb0b93 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala @@ -126,7 +126,8 @@ class TimeoutsSpec extends StreamSpec { "BackpressureTimeout" must { "pass through elements unmodified" in { - Await.result(Source(1 to 100).backpressureTimeout(1.second).grouped(200).runWith(Sink.head), 3.seconds) should ===( + Await.result(Source(1 to 100).backpressureTimeout(1.second).grouped(200).runWith(Sink.head), + 3.seconds) should ===( 1 to 100) } @@ -271,7 +272,7 @@ class TimeoutsSpec extends StreamSpec { import GraphDSL.Implicits._ val timeoutStage = b.add(BidiFlow.bidirectionalIdleTimeout[String, Int](2.seconds)) Source.fromPublisher(upWrite) ~> timeoutStage.in1 - timeoutStage.out1 ~> Sink.fromSubscriber(downRead) + timeoutStage.out1 ~> Sink.fromSubscriber(downRead) Sink.fromSubscriber(upRead) <~ timeoutStage.out2 timeoutStage.in2 <~ Source.fromPublisher(downWrite) ClosedShape @@ -321,7 +322,7 @@ class TimeoutsSpec extends StreamSpec { import GraphDSL.Implicits._ val timeoutStage = b.add(BidiFlow.bidirectionalIdleTimeout[String, Int](2.seconds)) Source.fromPublisher(upWrite) ~> timeoutStage.in1 - timeoutStage.out1 ~> Sink.fromSubscriber(downRead) + timeoutStage.out1 ~> Sink.fromSubscriber(downRead) Sink.fromSubscriber(upRead) <~ timeoutStage.out2 timeoutStage.in2 <~ Source.fromPublisher(downWrite) ClosedShape diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala index 0a43d8b9f8..10f727ea72 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala @@ -57,25 +57,29 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out1, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + setHandler(in1, + new InHandler { + override def onPush(): Unit = push(out1, grab(in1)) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out2, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + setHandler(in2, + new InHandler { + override def onPush(): Unit = push(out2, grab(in2)) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + setHandler(out1, + new OutHandler { + override def onPull(): Unit = pull(in1) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + setHandler(out2, + new OutHandler { + override def onPull(): Unit = pull(in2) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) } override def toString = "IdentityBidi" @@ -100,29 +104,33 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out1, grab(in1)) + setHandler(in1, + new InHandler { + override def onPush(): Unit = push(out1, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out2, grab(in2)) + setHandler(in2, + new InHandler { + override def onPush(): Unit = push(out2, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in1) + setHandler(out1, + new OutHandler { + override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in2) + setHandler(out2, + new OutHandler { + override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) } override def toString = "IdentityBidi" @@ -149,29 +157,33 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out1, grab(in1)) + setHandler(in1, + new InHandler { + override def onPush(): Unit = push(out1, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out2, grab(in2)) + setHandler(in2, + new InHandler { + override def onPush(): Unit = push(out2, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in1) + setHandler(out1, + new OutHandler { + override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in2) + setHandler(out2, + new OutHandler { + override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) } override def toString = "IdentityBidi" @@ -201,29 +213,33 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out2, grab(in1)) + setHandler(in1, + new InHandler { + override def onPush(): Unit = push(out2, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out1, grab(in2)) + setHandler(in2, + new InHandler { + override def onPush(): Unit = push(out1, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in2) + setHandler(out1, + new OutHandler { + override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in1) + setHandler(out2, + new OutHandler { + override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) } override def toString = "IdentityBidi" @@ -256,13 +272,14 @@ class ActorGraphInterpreterSpec extends StreamSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(shape.out, new OutHandler { - override def onPull(): Unit = { - completeStage() - // This cannot be propagated now since the stage is already closed - push(shape.out, -1) - } - }) + setHandler(shape.out, + new OutHandler { + override def onPull(): Unit = { + completeStage() + // This cannot be propagated now since the stage is already closed + push(shape.out, -1) + } + }) } } @@ -304,8 +321,8 @@ class ActorGraphInterpreterSpec extends StreamSpec { fail(shape.out0, te) } - setHandler(shape.out0, ignoreTerminateOutput) //We fail in preStart anyway - setHandler(shape.out1, ignoreTerminateOutput) //We fail in preStart anyway + setHandler(shape.out0, ignoreTerminateOutput) // We fail in preStart anyway + setHandler(shape.out1, ignoreTerminateOutput) // We fail in preStart anyway passAlong(shape.in, shape.out1) } } @@ -321,8 +338,8 @@ class ActorGraphInterpreterSpec extends StreamSpec { val faily = b.add(failyStage) Source.fromPublisher(upstream) ~> faily.in - faily.out0 ~> Sink.fromSubscriber(downstream0) - faily.out1 ~> Sink.fromSubscriber(downstream1) + faily.out0 ~> Sink.fromSubscriber(downstream0) + faily.out1 ~> Sink.fromSubscriber(downstream1) ClosedShape }) @@ -408,12 +425,14 @@ class ActorGraphInterpreterSpec extends StreamSpec { object PostStopSnitchFlow extends SimpleLinearGraphStage[String] { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = push(out, grab(in)) - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = push(out, grab(in)) + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def postStop(): Unit = { gotStop.countDown() diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala index 4ee2227dc1..ce55ef869e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala @@ -56,17 +56,18 @@ class AsyncCallbackSpec extends AkkaSpec(""" probe ! Stopped } - setHandlers(in, out, new InHandler with OutHandler { - def onPush(): Unit = { - val n = grab(in) - probe ! Elem(n) - push(out, n) - } + setHandlers(in, out, + new InHandler with OutHandler { + def onPush(): Unit = { + val n = grab(in) + probe ! Elem(n) + push(out, n) + } - def onPull(): Unit = { - pull(in) - } - }) + def onPull(): Unit = { + pull(in) + } + }) } (logic, logic.callback) @@ -200,8 +201,8 @@ class AsyncCallbackSpec extends AkkaSpec(""" } probe.expectMsg(Started) - Future.sequence(feedbacks).futureValue should have size (100) - (1 to 100).map(_ => probe.expectMsgType[String]).toSet should have size (100) + Future.sequence(feedbacks).futureValue should have size 100 + (1 to 100).map(_ => probe.expectMsgType[String]).toSet should have size 100 in.sendComplete() probe.expectMsg(Stopped) @@ -247,9 +248,10 @@ class AsyncCallbackSpec extends AkkaSpec(""" def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { val logic: GraphStageLogic { val callbacks: Set[AsyncCallback[AnyRef]] } = new GraphStageLogic(shape) { val callbacks = (0 to 10).map(_ => getAsyncCallback[AnyRef](probe ! _)).toSet - setHandler(out, new OutHandler { - def onPull(): Unit = () - }) + setHandler(out, + new OutHandler { + def onPull(): Unit = () + }) } (logic, logic.callbacks) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala index ff89b8ae7b..9a93d77978 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala @@ -704,7 +704,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore any completion if they are concurrent (cancel first) (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore any completion if they are concurrent (cancel first) (chasing = $chasing)" in new PortTestSetup( + chasing) { in.cancel() out.complete() diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala index 752b27a1c8..3bb77d8c20 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala @@ -449,7 +449,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } - "work with pushAndFinish if upstream completes with pushAndFinish" in new OneBoundedSetup[Int](new PushFinishStage) { + "work with pushAndFinish if upstream completes with pushAndFinish" in new OneBoundedSetup[Int]( + new PushFinishStage) { lastEvents() should be(Set.empty) diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala index 057a937be9..0588064f66 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala @@ -76,12 +76,13 @@ class KeepGoingStageSpec extends StreamSpec { } finally listener.foreach(_ ! EndOfEventHandler) } - setHandler(shape.in, new InHandler { - override def onPush(): Unit = pull(shape.in) + setHandler(shape.in, + new InHandler { + override def onPush(): Unit = pull(shape.in) - // Ignore finish - override def onUpstreamFinish(): Unit = listener.foreach(_ ! UpstreamCompleted) - }) + // Ignore finish + override def onUpstreamFinish(): Unit = listener.foreach(_ ! UpstreamCompleted) + }) override def postStop(): Unit = listener.foreach(_ ! PostStop) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala index 69af8eef75..a3f9cdbb60 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala @@ -64,7 +64,7 @@ class ByteStringParserSpec extends StreamSpec { def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new ParsingLogic { object ReadHeader extends ParseStep[ByteString] { def parse(reader: ByteReader): ParseResult[ByteString] = { - require(reader.readShortBE() == 0xcafe, "Magic header bytes not found") + require(reader.readShortBE() == 0xCAFE, "Magic header bytes not found") ParseResult(None, ReadData) } } @@ -82,10 +82,10 @@ class ByteStringParserSpec extends StreamSpec { Source[ByteString](data.toVector).via(MultistepParsing).fold(ByteString.empty)(_ ++ _).runWith(Sink.head), 5.seconds) - run(ByteString(0xca), ByteString(0xfe), ByteString(0xef, 0x12)) shouldEqual ByteString(0xef, 0x12) - run(ByteString(0xca), ByteString(0xfe, 0xef, 0x12)) shouldEqual ByteString(0xef, 0x12) - run(ByteString(0xca, 0xfe), ByteString(0xef, 0x12)) shouldEqual ByteString(0xef, 0x12) - run(ByteString(0xca, 0xfe, 0xef, 0x12)) shouldEqual ByteString(0xef, 0x12) + run(ByteString(0xCA), ByteString(0xFE), ByteString(0xEF, 0x12)) shouldEqual ByteString(0xEF, 0x12) + run(ByteString(0xCA), ByteString(0xFE, 0xEF, 0x12)) shouldEqual ByteString(0xEF, 0x12) + run(ByteString(0xCA, 0xFE), ByteString(0xEF, 0x12)) shouldEqual ByteString(0xEF, 0x12) + run(ByteString(0xCA, 0xFE, 0xEF, 0x12)) shouldEqual ByteString(0xEF, 0x12) } "don't spin when logic is flawed" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/DeprecatedTlsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/DeprecatedTlsSpec.scala index 6e042eac0f..b777be853c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/DeprecatedTlsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/DeprecatedTlsSpec.scala @@ -73,15 +73,17 @@ object DeprecatedTlsSpec { override def preStart(): Unit = scheduleOnce((), duration) var last: ByteString = _ - setHandler(in, new InHandler { - override def onPush(): Unit = { - last = grab(in) - push(out, last) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + last = grab(in) + push(out, last) + } + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def onTimer(x: Any): Unit = { failStage(new TimeoutException(s"timeout expired, last element was $last")) } @@ -519,7 +521,7 @@ class DeprecatedTlsSpec extends StreamSpec(DeprecatedTlsSpec.configOverrides) wi Await.result(run("unknown.example.org"), 3.seconds) } - cause.getClass should ===(classOf[SSLHandshakeException]) //General SSLEngine problem + cause.getClass should ===(classOf[SSLHandshakeException]) // General SSLEngine problem val rootCause = rootCauseOf(cause.getCause) rootCause.getClass should ===(classOf[CertificateException]) rootCause.getMessage should ===("No name matching unknown.example.org found") diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala index 77357fc379..7fc8ee7d11 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala @@ -58,12 +58,12 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures "create new file if not exists" in { targetFile({ f => - val completion = Source(TestByteStrings).runWith(FileIO.toPath(f)) + val completion = Source(TestByteStrings).runWith(FileIO.toPath(f)) - val result = Await.result(completion, 3.seconds) - result.count should equal(6006) - checkFileContents(f, TestLines.mkString("")) - }, create = false) + val result = Await.result(completion, 3.seconds) + result.count should equal(6006) + checkFileContents(f, TestLines.mkString("")) + }, create = false) } "write into existing file without wiping existing data" in { @@ -197,7 +197,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures } "write single line to a file from lazy sink" in { - //LazySink must wait for result of initialization even if got upstreamComplete + // LazySink must wait for result of initialization even if got upstreamComplete targetFile { f => val completion = Source(List(TestByteStrings.head)).runWith( Sink diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala index d8057ff853..2002dfa426 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala @@ -158,7 +158,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { val sinkProbe = TestProbe() val inputStream = Source[ByteString](bytes).runWith(testSink(sinkProbe)) - //need to wait while all elements arrive to sink + // need to wait while all elements arrive to sink bytes.foreach { _ => sinkProbe.expectMsg(GraphStageMessages.Push) } @@ -175,7 +175,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { val sinkProbe = TestProbe() val inputStream = Source(bytes1 :: bytes2 :: Nil).runWith(testSink(sinkProbe)) - //need to wait while both elements arrive to sink + // need to wait while both elements arrive to sink sinkProbe.expectMsgAllOf(GraphStageMessages.Push, GraphStageMessages.Push) readN(inputStream, 15) should ===((15, bytes1 ++ bytes2.take(5))) @@ -233,7 +233,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { itself throws an exception when being materialized. If Source.empty is used, the same exception is thrown by Materializer. - */ + */ } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala index cc3f0a6941..60d8c6c904 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala @@ -44,12 +44,11 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { ManagementFactory.getThreadMXBean .dumpAllThreads(true, true) .toSeq - .filter( - t => - t.getThreadName.startsWith("OutputStreamSourceSpec") && - t.getLockName != null && - t.getLockName.startsWith("java.util.concurrent.locks.AbstractQueuedSynchronizer") && - t.getStackTrace.exists(s => s.getClassName.startsWith(classOf[OutputStreamSourceStage].getName))) + .filter(t => + t.getThreadName.startsWith("OutputStreamSourceSpec") && + t.getLockName != null && + t.getLockName.startsWith("java.util.concurrent.locks.AbstractQueuedSynchronizer") && + t.getStackTrace.exists(s => s.getClassName.startsWith(classOf[OutputStreamSourceStage].getName))) awaitAssert(threadsBlocked should ===(Seq()), 5.seconds, interval = 500.millis) } @@ -110,7 +109,7 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { outputStream.write(bytesArray) } - //blocked call + // blocked call val f = Future(outputStream.write(bytesArray)) expectTimeout(f, timeout) @@ -157,7 +156,7 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { itself throws an exception when being materialized. If Sink.ignore is used, the same exception is thrown by Materializer. - */ + */ } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala index 915c96d341..7f0da41bf3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala @@ -265,9 +265,9 @@ class TcpSpec extends StreamSpec(""" // Need a write on the server side to detect the close event awaitAssert({ - serverConnection.write(testData) - serverConnection.expectClosed(_.isErrorClosed, 500.millis) - }, max = 5.seconds) + serverConnection.write(testData) + serverConnection.expectClosed(_.isErrorClosed, 500.millis) + }, max = 5.seconds) serverConnection.expectTerminated() } @@ -302,9 +302,9 @@ class TcpSpec extends StreamSpec(""" // Need a write on the server side to detect the close event awaitAssert({ - serverConnection.write(testData) - serverConnection.expectClosed(_.isErrorClosed, 500.millis) - }, max = 5.seconds) + serverConnection.write(testData) + serverConnection.expectClosed(_.isErrorClosed, 500.millis) + }, max = 5.seconds) serverConnection.expectTerminated() } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala index 0ba32018fd..1922ca1af3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala @@ -72,15 +72,17 @@ object TlsSpec { override def preStart(): Unit = scheduleOnce((), duration) var last: ByteString = _ - setHandler(in, new InHandler { - override def onPush(): Unit = { - last = grab(in) - push(out, last) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + last = grab(in) + push(out, last) + } + }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def onTimer(x: Any): Unit = { failStage(new TimeoutException(s"timeout expired, last element was $last")) } @@ -476,7 +478,7 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing Sink.head[ByteString], Sink.head[SslTlsInbound])((_, _, _)) { implicit b => (s, o1, o2) => val tls = b.add(clientTls(EagerClose)) - s ~> tls.in1 + s ~> tls.in1 tls.out1 ~> o1 o2 <~ tls.out2 tls.in2 <~ Source.failed(ex) @@ -500,7 +502,7 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing (_, _, _)) { implicit b => (s, o1, o2) => val tls = b.add(clientTls(EagerClose)) Source.failed[SslTlsOutbound](ex) ~> tls.in1 - tls.out1 ~> o1 + tls.out1 ~> o1 o2 <~ tls.out2 tls.in2 <~ s ClosedShape @@ -567,7 +569,7 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing Await.result(run("unknown.example.org"), 3.seconds) } - cause.getClass should ===(classOf[SSLHandshakeException]) //General SSLEngine problem + cause.getClass should ===(classOf[SSLHandshakeException]) // General SSLEngine problem val rootCause = rootCauseOf(cause.getCause) rootCause.getClass should ===(classOf[CertificateException]) diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala index 3ef5881aeb..fdaea97f17 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala @@ -90,8 +90,8 @@ abstract class CoderSpec(codecName: String) extends AnyWordSpec with CodecSpecSu val chunks = largeTextBytes.grouped(512).toVector val comp = newCompressor() val compressedChunks = chunks.map { chunk => - comp.compressAndFlush(chunk) - } :+ comp.finish() + comp.compressAndFlush(chunk) + } :+ comp.finish() val uncompressed = decodeFromIterator(() => compressedChunks.iterator) uncompressed should readAs(largeText) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala index 253e922dca..f2877934e2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala @@ -25,9 +25,9 @@ class AggregateWithBoundarySpec extends StreamSpec { val groupSize = 3 val result = Source(stream) .aggregateWithBoundary(allocate = () => ListBuffer.empty[Int])(aggregate = (buffer, i) => { - buffer += i - (buffer, buffer.size >= groupSize) - }, harvest = buffer => buffer.toSeq, emitOnTimer = None) + buffer += i + (buffer, buffer.size >= groupSize) + }, harvest = buffer => buffer.toSeq, emitOnTimer = None) .runWith(Sink.collection) Await.result(result, 10.seconds) should be(stream.grouped(groupSize).toSeq) @@ -57,9 +57,9 @@ class AggregateWithBoundarySpec extends StreamSpec { val result = Source(stream) .aggregateWithBoundary(allocate = () => ListBuffer.empty[Int])(aggregate = (buffer, i) => { - buffer += i - (buffer, buffer.sum >= weight) - }, harvest = buffer => buffer.toSeq, emitOnTimer = None) + buffer += i + (buffer, buffer.sum >= weight) + }, harvest = buffer => buffer.toSeq, emitOnTimer = None) .runWith(Sink.collection) Await.result(result, 10.seconds) should be(Seq(Seq(1, 2, 3, 4), Seq(5, 6), Seq(7))) @@ -121,16 +121,17 @@ class AggregateWithTimeBoundaryAndSimulatedTimeSpec extends AnyWordSpecLike with } source.aggregateWithBoundary(allocate = () => new ValueTimeWrapper(value = allocate))(aggregate = (agg, in) => { - agg.updateTime(currentTimeMs) - // user provided Agg type must be mutable - val (updated, result) = aggregate(agg.value, in) - agg.value = updated - (agg, result) - }, harvest = agg => harvest(agg.value), emitOnTimer = Some((agg => { - val currentTime = currentTimeMs - maxDuration.exists(md => currentTime - agg.firstTime >= md.toMillis) || - maxGap.exists(mg => currentTime - agg.lastTime >= mg.toMillis) - }, interval))) + agg.updateTime(currentTimeMs) + // user provided Agg type must be mutable + val (updated, result) = aggregate(agg.value, in) + agg.value = updated + (agg, result) + }, harvest = agg => harvest(agg.value), + emitOnTimer = Some((agg => { + val currentTime = currentTimeMs + maxDuration.exists(md => currentTime - agg.firstTime >= md.toMillis) || + maxGap.exists(mg => currentTime - agg.lastTime >= mg.toMillis) + }, interval))) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala index 8c508ed4f1..7f3a09562e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala @@ -30,9 +30,10 @@ object AttributesSpec { override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Attributes) = { val logic = new GraphStageLogic(shape) { - setHandler(out, new OutHandler { - def onPull(): Unit = {} - }) + setHandler(out, + new OutHandler { + def onPull(): Unit = {} + }) } (logic, inheritedAttributes) } @@ -50,10 +51,11 @@ object AttributesSpec { override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Attributes) = { val logic = new GraphStageLogic(shape) { - setHandlers(in, out, new InHandler with OutHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onPull(): Unit = pull(in) - }) + setHandlers(in, out, + new InHandler with OutHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onPull(): Unit = pull(in) + }) } (logic, inheritedAttributes) @@ -72,12 +74,13 @@ object AttributesSpec { override def preStart(): Unit = { pull(in) } - setHandler(in, new InHandler { - override def onPush(): Unit = { - grab(in) - pull(in) - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + grab(in) + pull(in) + } + }) } (logic, inheritedAttributes) @@ -91,12 +94,13 @@ object AttributesSpec { override protected def initialAttributes: Attributes = initialDispatcher.fold(Attributes.none)(name => ActorAttributes.dispatcher(name)) def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(out, new OutHandler { - def onPull(): Unit = { - push(out, Thread.currentThread.getName) - completeStage() - } - }) + setHandler(out, + new OutHandler { + def onPull(): Unit = { + push(out, Thread.currentThread.getName) + completeStage() + } + }) } } @@ -365,7 +369,7 @@ class AttributesSpec val streamSnapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(materializer).futureValue - snapshot should have size (1) // just the one island in this case + snapshot should have size 1 // just the one island in this case snapshot.head } @@ -443,7 +447,7 @@ class AttributesSpec val snapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(materializer).futureValue - snapshot should have size (2) // two stream "islands", one on blocking dispatcher and one on default + snapshot should have size 2 // two stream "islands", one on blocking dispatcher and one on default snapshot } @@ -478,7 +482,7 @@ class AttributesSpec val snapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(system).futureValue - snapshot should have size (2) // two stream "islands", one on blocking dispatcher and one on default + snapshot should have size 2 // two stream "islands", one on blocking dispatcher and one on default snapshot } @@ -512,7 +516,7 @@ class AttributesSpec val snapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(system).futureValue - snapshot should have size (2) // two stream "islands", one on blocking dispatcher and one on default + snapshot should have size 2 // two stream "islands", one on blocking dispatcher and one on default snapshot } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala index 42f67e1aed..9e0dd69ef7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala @@ -95,9 +95,9 @@ class BidiFlowSpec extends StreamSpec { val bcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) val flow = b.add(Flow[String].map(Integer.valueOf(_).toInt)) - bcast ~> sink + bcast ~> sink Source.single(1) ~> bcast ~> merge - flow ~> merge + flow ~> merge FlowShape(flow.in, merge.out) }) val right = Flow.fromGraph(GraphDSL.createGraph(Sink.head[immutable.Seq[Long]]) { implicit b => sink => diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala index 1bba7095bc..a4b8a0a6c0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala @@ -174,8 +174,8 @@ class CancellationStrategySpec extends StreamSpec("""akka.loglevel = DEBUG val fanOut = b.add(new TestFanOut) Source.fromPublisher(inProbe) ~> fanOut.in - fanOut.out(0) ~> Sink.fromSubscriber(out1Probe) - fanOut.out(1) ~> Sink.fromSubscriber(out2Probe) + fanOut.out(0) ~> Sink.fromSubscriber(out1Probe) + fanOut.out(1) ~> Sink.fromSubscriber(out2Probe) ClosedShape } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala index 806124eebd..fb83e99362 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala @@ -20,7 +20,7 @@ class CollectionSinkSpec extends StreamSpec(""" "Sink.collection" when { "using Seq as Collection" must { "return a Seq[T] from a Source" in { - val input = (1 to 6) + val input = 1 to 6 val future: Future[immutable.Seq[Int]] = Source(input).runWith(Sink.collection) val result: immutable.Seq[Int] = Await.result(future, remainingOrDefault) result should be(input.toSeq) @@ -43,7 +43,7 @@ class CollectionSinkSpec extends StreamSpec(""" } "using Vector as Collection" must { "return a Vector[T] from a Source" in { - val input = (1 to 6) + val input = 1 to 6 val future: Future[immutable.Vector[Int]] = Source(input).runWith(Sink.collection) val result: immutable.Vector[Int] = Await.result(future, remainingOrDefault) result should be(input.toVector) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala index 62b69c3b1d..2300153702 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala @@ -99,15 +99,16 @@ class CoupledTerminationFlowSpec extends StreamSpec(""" "cancel in:Sink => cancel out:Source" in { val probe = TestProbe() - val f = Flow.fromSinkAndSourceCoupledMat(Sink.cancelled, Source.fromPublisher(new Publisher[String] { - override def subscribe(subscriber: Subscriber[_ >: String]): Unit = { - subscriber.onSubscribe(new Subscription { - override def cancel(): Unit = probe.ref ! "cancelled" + val f = Flow.fromSinkAndSourceCoupledMat(Sink.cancelled, + Source.fromPublisher(new Publisher[String] { + override def subscribe(subscriber: Subscriber[_ >: String]): Unit = { + subscriber.onSubscribe(new Subscription { + override def cancel(): Unit = probe.ref ! "cancelled" - override def request(l: Long): Unit = () // do nothing - }) - } - }))(Keep.none) // completes right away, should complete the sink as well + override def request(l: Long): Unit = () // do nothing + }) + } + }))(Keep.none) // completes right away, should complete the sink as well f.runWith(Source.maybe, Sink.ignore) // these do nothing. diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala index 8e099a23fe..d49855255a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala @@ -80,11 +80,11 @@ class FlowConcatAllLazySpec extends StreamSpec(""" .expectNext(1, 2) .cancel() .expectNoMessage() - materialized.get() shouldBe (false) + materialized.get() shouldBe false } "work in example" in { - //#concatAllLazy + // #concatAllLazy val sourceA = Source(List(1, 2, 3)) val sourceB = Source(List(4, 5, 6)) val sourceC = Source(List(7, 8, 9)) @@ -92,8 +92,8 @@ class FlowConcatAllLazySpec extends StreamSpec(""" .concatAllLazy(sourceB, sourceC) .fold(new StringJoiner(","))((joiner, input) => joiner.add(String.valueOf(input))) .runWith(Sink.foreach(println)) - //prints 1,2,3,4,5,6,7,8,9 - //#concatAllLazy + // prints 1,2,3,4,5,6,7,8,9 + // #concatAllLazy } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala index 9e9a6cdaf9..979c12f12c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala @@ -157,7 +157,7 @@ abstract class AbstractFlowConcatSpec extends BaseTwoStreamsSetup { m1.isInstanceOf[NotUsed] should be(true) m2.isInstanceOf[NotUsed] should be(true) - runnable.mapMaterializedValue((_) => "boo").run() should be("boo") + runnable.mapMaterializedValue(_ => "boo").run() should be("boo") } "work with Flow DSL" in { @@ -174,7 +174,7 @@ abstract class AbstractFlowConcatSpec extends BaseTwoStreamsSetup { m2.isInstanceOf[NotUsed] should be(true) m3.isInstanceOf[NotUsed] should be(true) - runnable.mapMaterializedValue((_) => "boo").run() should be("boo") + runnable.mapMaterializedValue(_ => "boo").run() should be("boo") } "work with Flow DSL2" in { @@ -244,14 +244,14 @@ class FlowConcatSpec extends AbstractFlowConcatSpec with ScalaFutures { "concat" must { "work in example" in { - //#concat + // #concat val sourceA = Source(List(1, 2, 3, 4)) val sourceB = Source(List(10, 20, 30, 40)) sourceA.concat(sourceB).runWith(Sink.foreach(println)) - //prints 1, 2, 3, 4, 10, 20, 30, 40 - //#concat + // prints 1, 2, 3, 4, 10, 20, 30, 40 + // #concat } } } @@ -282,12 +282,12 @@ class FlowConcatLazySpec extends AbstractFlowConcatSpec { } "work in example" in { - //#concatLazy + // #concatLazy val sourceA = Source(List(1, 2, 3, 4)) val sourceB = Source(List(10, 20, 30, 40)) sourceA.concatLazy(sourceB).runWith(Sink.foreach(println)) - //#concatLazy + // #concatLazy } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala index 18eb228f9a..4fc6879829 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala @@ -48,12 +48,12 @@ class FlowDelaySpec extends StreamSpec { .delay(300.millis) .runWith(TestSink.probe[Int]) .request(2) - .expectNoMessage(200.millis) //delay - .expectNext(200.millis, 1) //delayed element - .expectNext(100.millis, 2) //buffered element + .expectNoMessage(200.millis) // delay + .expectNext(200.millis, 1) // delayed element + .expectNext(100.millis, 2) // buffered element .expectNoMessage(200.millis) .request(1) - .expectNext(3) //buffered element + .expectNext(3) // buffered element .expectComplete() } @@ -167,7 +167,7 @@ class FlowDelaySpec extends StreamSpec { c.expectNoMessage(300.millis) pSub.sendNext(17) c.expectNext(100.millis, 1) - //fail will terminate despite of non empty internal buffer + // fail will terminate despite of non empty internal buffer pSub.sendError(new RuntimeException() with NoStackTrace) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala index 959d49e5d7..e8c2564d36 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala @@ -190,7 +190,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { downstream.request(1000) upstream.expectRequest() - //completing publisher + // completing publisher upstream.sendComplete() matValue.futureValue should ===(Nil) @@ -299,7 +299,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { // inner stream was materialized innerMatVal.futureValue should ===(NotUsed) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subDownstream.request(1) subscriber.expectNext(2) subUpstream.sendNext(22) @@ -335,16 +335,16 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { val subDownstream = subscriber.expectSubscription() val subUpstream = publisher.expectSubscription() subDownstream.request(1) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subUpstream.sendNext(0) subUpstream.sendNext(1) - //subflow not materialized yet, hence mat value (future) isn't ready yet + // subflow not materialized yet, hence mat value (future) isn't ready yet matFlowWatchTerm.value should be(empty) if (delayDownstreamCancellation) { srcWatchTermF.value should be(empty) - //this one is sent AFTER downstream cancellation + // this one is sent AFTER downstream cancellation subUpstream.sendNext(2) subDownstream.cancel() @@ -381,7 +381,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { val subDownstream = subscriber.expectSubscription() val subUpstream = publisher.expectSubscription() subDownstream.request(1) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subUpstream.sendNext(0) subUpstream.sendNext(1) subDownstream.asInstanceOf[SubscriptionWithCancelException].cancel(TE("that again?!")) @@ -431,7 +431,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { notUsedF.futureValue should ===(NotUsed) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subDownstream.request(1) subscriber.expectNext(2) subUpstream.sendNext(2) @@ -491,9 +491,9 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { .withAttributes(attributes) .run()(mat) val countF = countFF.futureValue - //at this point we know the flow was materialized, now we can stop the materializer + // at this point we know the flow was materialized, now we can stop the materializer mat.shutdown() - //expect the nested flow to be terminated abruptly. + // expect the nested flow to be terminated abruptly. countF.failed.futureValue should be(a[AbruptStageTerminationException]) } @@ -579,12 +579,12 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { subscriber.expectNoMessage() subsc.sendNext(1) val sinkSubscription = subscriber.expectSubscription() - //this indicates + // this indicates fHeadOpt.futureValue should be(empty) - //materialize flow immediately cancels upstream + // materialize flow immediately cancels upstream subsc.expectCancellation() - //at this point both ends of the 'external' fow are closed + // at this point both ends of the 'external' fow are closed sinkSubscription.request(10) subscriber.expectNext("a", "b", "c") @@ -602,11 +602,11 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { log.debug("closing sink") closeSink() log.debug("sink closed") - //closing the sink before returning means that it's higly probably - //for the flatMapPrefix stage to receive the downstream cancellation before the actor graph interpreter - //gets a chance to complete the new interpreter shell's registration. - //this in turn exposes a bug in the actor graph interpreter when all active flows complete - //but there are pending new interpreter shells to be registered. + // closing the sink before returning means that it's higly probably + // for the flatMapPrefix stage to receive the downstream cancellation before the actor graph interpreter + // gets a chance to complete the new interpreter shell's registration. + // this in turn exposes a bug in the actor graph interpreter when all active flows complete + // but there are pending new interpreter shells to be registered. Flow[Int].prepend(Source(seq)) }(Keep.right) .toMat(Sink.queue(10))(Keep.both) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala index 96aba2769f..edfe548535 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala @@ -139,12 +139,13 @@ class FlowFlattenMergeSpec extends StreamSpec { val ex = new Exception("buh") val latch = TestLatch() Source(1 to 3) - .flatMapMerge(10, { - case 1 => Source.fromPublisher(p) - case 2 => - Await.ready(latch, 3.seconds) - throw ex - }) + .flatMapMerge(10, + { + case 1 => Source.fromPublisher(p) + case 2 => + Await.ready(latch, 3.seconds) + throw ex + }) .toMat(Sink.head)(Keep.right) .withAttributes(ActorAttributes.syncProcessingLimit(1) and Attributes.inputBuffer(1, 1)) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala index c40233b465..9dd64176af 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala @@ -19,9 +19,9 @@ class FlowFutureFlowSpec extends StreamSpec { case x => x } - //this stage's behaviour in case of an 'early' downstream cancellation is governed by an attribute - //so we run all tests cases using both modes of the attributes. - //please notice most of the cases don't exhibit any difference in behaviour between the two modes + // this stage's behaviour in case of an 'early' downstream cancellation is governed by an attribute + // so we run all tests cases using both modes of the attributes. + // please notice most of the cases don't exhibit any difference in behaviour between the two modes for { (att, name) <- List( (Attributes.NestedMaterializationCancellationPolicy.EagerCancellation, "EagerCancellation"), diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala index 94446160ad..075d039700 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala @@ -146,7 +146,7 @@ class FlowGroupBySpec extends StreamSpec(""" .runWith(TestSink.probe[Seq[String]]) down.request(1) val ex = down.expectError() - ex.getMessage.indexOf("Key cannot be null") should not be (-1) + ex.getMessage.indexOf("Key cannot be null") should not be -1 ex.isInstanceOf[IllegalArgumentException] should be(true) } @@ -585,7 +585,7 @@ class FlowGroupBySpec extends StreamSpec(""" state.probe.request(1) - //need to verify elements that are first element in subFlow or is in nextElement buffer before + // need to verify elements that are first element in subFlow or is in nextElement buffer before // pushing next element from upstream if (state.firstElement != null) { state.probe.expectNext() should ===(state.firstElement) @@ -622,10 +622,10 @@ class FlowGroupBySpec extends StreamSpec(""" val probe: TestSubscriber.Probe[ByteString] = Await.result(probes(probeIndex).future, 300.millis) probeIndex += 1 map.put(index, SubFlowState(probe, false, byteString)) - //stream automatically requests next element + // stream automatically requests next element } else { val state = map(index) - if (state.firstElement != null) { //first element in subFlow + if (state.firstElement != null) { // first element in subFlow if (!state.hasDemand) blockingNextElement = byteString randomDemand() } else if (state.hasDemand) { @@ -665,14 +665,17 @@ class FlowGroupBySpec extends StreamSpec(""" val threeProcessed = Promise[Done]() val blockSubStream1 = TestLatch() - List(Elem(1, 1, () => { - // timeout just to not wait forever if something is wrong, not really relevant for test - Await.result(blockSubStream1, 10.seconds) - 1 - }), Elem(2, 1, () => 2), Elem(3, 2, () => { - threeProcessed.success(Done) - 3 - })).foreach(queue.offer) + List(Elem(1, 1, + () => { + // timeout just to not wait forever if something is wrong, not really relevant for test + Await.result(blockSubStream1, 10.seconds) + 1 + }), Elem(2, 1, () => 2), + Elem(3, 2, + () => { + threeProcessed.success(Done) + 3 + })).foreach(queue.offer) // two and three are processed as fast as possible, not blocked by substream 1 being clogged threeProcessed.future.futureValue should ===(Done) // let 1 pass so stream can complete diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala index 8eaeac8bb3..d246d52768 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala @@ -26,7 +26,7 @@ class FlowGroupedWeightedSpec extends StreamSpec(""" } "always exhaust a source into a single group if cost is 0" in { - val input = (1 to 15) + val input = 1 to 15 def costFn(@unused e: Int): Long = 0L val minWeight = 1 // chose the least possible value for minWeight val future = Source(input).groupedWeighted(minWeight)(costFn).runWith(Sink.seq) @@ -35,7 +35,7 @@ class FlowGroupedWeightedSpec extends StreamSpec(""" } "exhaust source into one group if minWeight equals the accumulated cost of the source" in { - val input = (1 to 16) + val input = 1 to 16 def costFn(@unused e: Int): Long = 1L val minWeight = input.length val future = Source(input).groupedWeighted(minWeight)(costFn).runWith(Sink.seq) @@ -76,17 +76,17 @@ class FlowGroupedWeightedSpec extends StreamSpec(""" "fail during stream initialization when minWeight is negative" in { val ex = the[IllegalArgumentException] thrownBy Source(1 to 5) - .groupedWeighted(-1)(_ => 1L) - .to(Sink.collection) - .run() + .groupedWeighted(-1)(_ => 1L) + .to(Sink.collection) + .run() ex.getMessage should be("requirement failed: minWeight must be greater than 0") } "fail during stream initialization when minWeight is 0" in { val ex = the[IllegalArgumentException] thrownBy Source(1 to 5) - .groupedWeighted(0)(_ => 1L) - .to(Sink.collection) - .run() + .groupedWeighted(0)(_ => 1L) + .to(Sink.collection) + .run() ex.getMessage should be("requirement failed: minWeight must be greater than 0") } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala index 70078e47c1..4654b0dda0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala @@ -25,7 +25,8 @@ class FlowIdleInjectSpec extends StreamSpec(""" "emit elements periodically after silent periods" in { val sourceWithIdleGap = Source(1 to 5) ++ Source(6 to 10).initialDelay(2.second) - Await.result(sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head), 3.seconds) should ===( + Await.result(sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head), + 3.seconds) should ===( List(1, 2, 3, 4, 5, 0, 0, 0, 6, 7, 8, 9, 10)) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala index 63057a625d..c764366cd5 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala @@ -100,7 +100,7 @@ class FlowInterleaveAllSpec extends StreamSpec(""" } "work in example" in { - //#interleaveAll + // #interleaveAll val sourceA = Source(List(1, 2, 7, 8)) val sourceB = Source(List(3, 4, 9)) val sourceC = Source(List(5, 6)) @@ -109,8 +109,8 @@ class FlowInterleaveAllSpec extends StreamSpec(""" .interleaveAll(List(sourceB, sourceC), 2, eagerClose = false) .fold(new StringJoiner(","))((joiner, input) => joiner.add(String.valueOf(input))) .runWith(Sink.foreach(println)) - //prints 1,2,3,4,5,6,7,8,9 - //#interleaveAll + // prints 1,2,3,4,5,6,7,8,9 + // #interleaveAll } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala index cc3b13be2a..822e462893 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala @@ -240,7 +240,7 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { } "work in example" in { - //#interleave + // #interleave import akka.stream.scaladsl.Sink import akka.stream.scaladsl.Source @@ -248,8 +248,8 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { val sourceB = Source(List(10, 20, 30, 40)) sourceA.interleave(sourceB, segmentSize = 2).runWith(Sink.foreach(println)) - //prints 1, 2, 10, 20, 3, 4, 30, 40 - //#interleave + // prints 1, 2, 10, 20, 3, 4, 30, 40 + // #interleave } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala index 64dcd95cf4..193a6e3d8d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala @@ -32,8 +32,8 @@ class FlowJoinSpec extends StreamSpec(""" import GraphDSL.Implicits._ val merge = b.add(Merge[Int](2)) val broadcast = b.add(Broadcast[Int](2)) - source ~> merge.in(0) - merge.out ~> broadcast.in + source ~> merge.in(0) + merge.out ~> broadcast.in broadcast.out(0).grouped(1000) ~> Sink.fromSubscriber(probe) FlowShape(merge.in(1), broadcast.out(1)) @@ -60,8 +60,8 @@ class FlowJoinSpec extends StreamSpec(""" import GraphDSL.Implicits._ val merge = b.add(Merge[String](2)) val broadcast = b.add(Broadcast[String](2, eagerCancel = true)) - source ~> merge.in(0) - merge.out ~> broadcast.in + source ~> merge.in(0) + merge.out ~> broadcast.in broadcast.out(0) ~> sink FlowShape(merge.in(1), broadcast.out(1)) @@ -77,8 +77,8 @@ class FlowJoinSpec extends StreamSpec(""" import GraphDSL.Implicits._ val merge = b.add(MergePreferred[String](1)) val broadcast = b.add(Broadcast[String](2, eagerCancel = true)) - source ~> merge.preferred - merge.out ~> broadcast.in + source ~> merge.preferred + merge.out ~> broadcast.in broadcast.out(0) ~> sink FlowShape(merge.in(0), broadcast.out(1)) @@ -94,8 +94,8 @@ class FlowJoinSpec extends StreamSpec(""" import GraphDSL.Implicits._ val zip = b.add(Zip[String, String]()) val broadcast = b.add(Broadcast[(String, String)](2)) - source ~> zip.in0 - zip.out ~> broadcast.in + source ~> zip.in0 + zip.out ~> broadcast.in broadcast.out(0) ~> sink FlowShape(zip.in1, broadcast.out(1)) @@ -107,7 +107,7 @@ class FlowJoinSpec extends StreamSpec(""" val merge = b.add(Merge[String](2)) ignition ~> merge.in(0) - flow ~> merge.in(1) + flow ~> merge.in(1) FlowShape(flow.in, merge.out) }) @@ -123,8 +123,8 @@ class FlowJoinSpec extends StreamSpec(""" import GraphDSL.Implicits._ val concat = b.add(Concat[String](2)) val broadcast = b.add(Broadcast[String](2, eagerCancel = true)) - source ~> concat.in(0) - concat.out ~> broadcast.in + source ~> concat.in(0) + concat.out ~> broadcast.in broadcast.out(0) ~> sink FlowShape(concat.in(1), broadcast.out(1)) @@ -145,8 +145,8 @@ class FlowJoinSpec extends StreamSpec(""" import GraphDSL.Implicits._ val merge = b.add(Interleave[String](2, 1)) val broadcast = b.add(Broadcast[String](2, eagerCancel = true)) - source ~> merge.in(0) - merge.out ~> broadcast.in + source ~> merge.in(0) + merge.out ~> broadcast.in broadcast.out(0) ~> sink FlowShape(merge.in(1), broadcast.out(1)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala index 88dd76a29a..38673a4d49 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala @@ -23,7 +23,7 @@ class FlowLimitSpec extends StreamSpec(""" } "produce output that is identical to the input when n = input.length" in { - val input = (1 to 6) + val input = 1 to 6 val n = input.length val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) val result = Await.result(future, remainingOrDefault) @@ -31,7 +31,7 @@ class FlowLimitSpec extends StreamSpec(""" } "produce output that is identical to the input when n > input.length" in { - val input = (1 to 6) + val input = 1 to 6 val n = input.length + 2 // n > input.length val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) val result = Await.result(future, remainingOrDefault) @@ -40,7 +40,7 @@ class FlowLimitSpec extends StreamSpec(""" "produce n messages before throwing a StreamLimitReachedException when n < input.size" in { // TODO: check if it actually produces n messages - val input = (1 to 6) + val input = 1 to 6 val n = input.length - 2 // n < input.length val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) @@ -51,7 +51,7 @@ class FlowLimitSpec extends StreamSpec(""" } "throw a StreamLimitReachedException when n < 0" in { - val input = (1 to 6) + val input = 1 to 6 val n = -1 val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala index 43648810db..e2f770d31a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala @@ -25,7 +25,7 @@ class FlowLimitWeightedSpec extends StreamSpec(""" } "always exhaust a source regardless of n (as long as n > 0) if cost is 0" in { - val input = (1 to 15) + val input = 1 to 15 def costFn(@unused e: Int): Long = 0L val n = 1 // must not matter since costFn always evaluates to 0 val future = Source(input).limitWeighted(n)(costFn).grouped(Integer.MAX_VALUE).runWith(Sink.head) @@ -34,7 +34,7 @@ class FlowLimitWeightedSpec extends StreamSpec(""" } "exhaust source if n equals to input length and cost is 1" in { - val input = (1 to 16) + val input = 1 to 16 def costFn(@unused e: Int): Long = 1L val n = input.length val future = Source(input).limitWeighted(n)(costFn).grouped(Integer.MAX_VALUE).runWith(Sink.head) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala index 19ad45d73c..0b33324b58 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala @@ -64,12 +64,14 @@ class FlowLogWithMarkerSpec extends StreamSpec(""" val debugging: javadsl.Flow[Integer, Integer, NotUsed] = javadsl.Flow .of(classOf[Integer]) .logWithMarker("log-1", _ => LogMarker("marker-1")) - .logWithMarker("log-2", _ => LogMarker("marker-2"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }) - .logWithMarker("log-3", _ => LogMarker("marker-3"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }, log) + .logWithMarker("log-2", _ => LogMarker("marker-2"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }) + .logWithMarker("log-3", _ => LogMarker("marker-3"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }, log) .logWithMarker("log-4", _ => LogMarker("marker-4"), log) javadsl.Source.single[Integer](1).via(debugging).runWith(javadsl.Sink.ignore[Integer](), system) @@ -168,12 +170,14 @@ class FlowLogWithMarkerSpec extends StreamSpec(""" javadsl.Source .single[Integer](1) .logWithMarker("log-1", _ => LogMarker("marker-1")) - .logWithMarker("log-2", _ => LogMarker("marker-2"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }) - .logWithMarker("log-3", _ => LogMarker("marker-3"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }, log) + .logWithMarker("log-2", _ => LogMarker("marker-2"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }) + .logWithMarker("log-3", _ => LogMarker("marker-3"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }, log) .logWithMarker("log-4", _ => LogMarker("marker-4"), log) .runWith(javadsl.Sink.ignore[Integer](), system) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala index 4d6d216072..1006036ee1 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala @@ -49,14 +49,13 @@ class FlowMapAsyncSpec extends StreamSpec { val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher Source(1 to 50) - .mapAsync(4)( - n => - if (n % 3 == 0) Future.successful(n) - else - Future { - Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) - n - }) + .mapAsync(4)(n => + if (n % 3 == 0) Future.successful(n) + else + Future { + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) + n + }) .to(Sink.fromSubscriber(c)) .run() val sub = c.expectSubscription() @@ -98,14 +97,13 @@ class FlowMapAsyncSpec extends StreamSpec { val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher Source(1 to 5) - .mapAsync(4)( - n => - if (n == 3) Future.failed[Int](new TE("err1")) - else - Future { - Await.ready(latch, 10.seconds) - n - }) + .mapAsync(4)(n => + if (n == 3) Future.failed[Int](new TE("err1")) + else + Future { + Await.ready(latch, 10.seconds) + n + }) .to(Sink.fromSubscriber(c)) .run() val sub = c.expectSubscription() @@ -437,17 +435,18 @@ class FlowMapAsyncSpec extends StreamSpec { val delay = 50000 // nanoseconds var count = 0 @tailrec final override def run(): Unit = { - val cont = try { - val (promise, enqueued) = queue.take() - val wakeup = enqueued + delay - while (System.nanoTime() < wakeup) {} - counter.decrementAndGet() - promise.success(count) - count += 1 - true - } catch { - case _: InterruptedException => false - } + val cont = + try { + val (promise, enqueued) = queue.take() + val wakeup = enqueued + delay + while (System.nanoTime() < wakeup) {} + counter.decrementAndGet() + promise.success(count) + count += 1 + true + } catch { + case _: InterruptedException => false + } if (cont) run() } } @@ -516,13 +515,12 @@ class FlowMapAsyncSpec extends StreamSpec { import system.dispatcher val failCount = new AtomicInteger(0) val result = Source(List(true, false)) - .mapAsync(1)( - elem => - if (elem) throw TE("this has gone too far") - else - Future { - elem - }) + .mapAsync(1)(elem => + if (elem) throw TE("this has gone too far") + else + Future { + elem + }) .addAttributes(supervisionStrategy { case TE("this has gone too far") => failCount.incrementAndGet() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala index d7db3c549d..24aef4c697 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala @@ -320,17 +320,18 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { val delay = 50000 // nanoseconds var count = 0 @tailrec final override def run(): Unit = { - val cont = try { - val (promise, enqueued) = queue.take() - val wakeup = enqueued + delay - while (System.nanoTime() < wakeup) {} - counter.decrementAndGet() - promise.success(count) - count += 1 - true - } catch { - case _: InterruptedException => false - } + val cont = + try { + val (promise, enqueued) = queue.take() + val wakeup = enqueued + delay + while (System.nanoTime() < wakeup) {} + counter.decrementAndGet() + promise.success(count) + count += 1 + true + } catch { + case _: InterruptedException => false + } if (cont) run() } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala index 0baa32bf19..0f4d0920c2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala @@ -130,7 +130,7 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { } "works in number example for mergePreferred" in { - //#mergePreferred + // #mergePreferred import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -141,11 +141,11 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { sourceA.mergePreferred(sourceB, true).runWith(Sink.foreach(println)) // prints 10, 1, ... since both sources have their first element ready and the right source is preferred - //#mergePreferred + // #mergePreferred } "works in number example for mergePrioritized" in { - //#mergePrioritized + // #mergePrioritized import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -155,11 +155,11 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { // prints e.g. 1, 10, 2, 3, 4, 20, 30, 40 since both sources have their first element ready and the left source // has higher priority – if both sources have elements ready, sourceA has a 99% chance of being picked next // while sourceB has a 1% chance - //#mergePrioritized + // #mergePrioritized } "works in number example for mergePrioritizedN" in { - //#mergePrioritizedN + // #mergePrioritizedN import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -172,28 +172,28 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { // prints e.g. 1, 100, 2, 3, 4, 10, 20, 30, 40, 200, 300, 400 since both sources have their first element ready and // the left sourceA has higher priority - if both sources have elements ready, sourceA has a 99% chance of being picked next // while sourceB has a 0.99% chance and sourceC has a 0.01% chance - //#mergePrioritizedN + // #mergePrioritizedN } "works in number example for merge sorted" in { - //#merge-sorted + // #merge-sorted import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 3, 5, 7)) val sourceB = Source(List(2, 4, 6, 8)) sourceA.mergeSorted(sourceB).runWith(Sink.foreach(println)) - //prints 1, 2, 3, 4, 5, 6, 7, 8 + // prints 1, 2, 3, 4, 5, 6, 7, 8 val sourceC = Source(List(20, 1, 1, 1)) sourceA.mergeSorted(sourceC).runWith(Sink.foreach(println)) - //prints 1, 3, 5, 7, 20, 1, 1, 1 - //#merge-sorted + // prints 1, 3, 5, 7, 20, 1, 1, 1 + // #merge-sorted } "works in number example for merge" in { - //#merge + // #merge import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -201,7 +201,7 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { sourceA.merge(sourceB).runWith(Sink.foreach(println)) // merging is not deterministic, can for example print 1, 2, 3, 4, 10, 20, 30, 40 - //#merge + // #merge } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala index 22d880520a..bd32f2c689 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala @@ -133,7 +133,7 @@ class FlowOrElseSpec extends AkkaSpec { } "work in the example" in { - //#or-else + // #or-else val source1 = Source(List("First source")) val source2 = Source(List("Second source")) val emptySource = Source.empty[String] @@ -143,7 +143,7 @@ class FlowOrElseSpec extends AkkaSpec { emptySource.orElse(source2).runWith(Sink.foreach(println)) // this will print "Second source" - //#or-else + // #or-else } trait OrElseProbedFlow { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala index 0f9a74b058..bf3ef7250a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala @@ -17,23 +17,23 @@ class FlowPrependSpec extends AkkaSpec { "An Prepend flow" should { "work in entrance example" in { - //#prepend + // #prepend val ladies = Source(List("Emma", "Emily")) val gentlemen = Source(List("Liam", "William")) gentlemen.prepend(ladies).runWith(Sink.foreach(println)) // this will print "Emma", "Emily", "Liam", "William" - //#prepend + // #prepend } "work in lazy entrance example" in { - //#prependLazy + // #prependLazy val ladies = Source(List("Emma", "Emily")) val gentlemen = Source(List("Liam", "William")) gentlemen.prependLazy(ladies).runWith(Sink.foreach(println)) // this will print "Emma", "Emily", "Liam", "William" - //#prependLazy + // #prependLazy } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala index b360daa0cd..adc558f2f0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala @@ -145,10 +145,11 @@ class FlowRecoverWithSpec extends StreamSpec { .map { a => if (a == 3) throw new IndexOutOfBoundsException() else a } - .recoverWithRetries(3, { - case t: Throwable => - Source(List(11, 22, 33)).map(m => if (m == 33) throw ex else m) - }) + .recoverWithRetries(3, + { + case t: Throwable => + Source(List(11, 22, 33)).map(m => if (m == 33) throw ex else m) + }) .runWith(TestSink.probe[Int]) .request(100) .expectNextN(List(1, 2)) @@ -208,9 +209,10 @@ class FlowRecoverWithSpec extends StreamSpec { val result = Source .failed(TE("trigger")) - .recoverWithRetries(1, { - case _: TE => Source.fromGraph(FailingInnerMat) - }) + .recoverWithRetries(1, + { + case _: TE => Source.fromGraph(FailingInnerMat) + }) .runWith(Sink.ignore) result.failed.futureValue should ===(matFail) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala index 5e9cdf5f86..f5b9f2821f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala @@ -57,7 +57,7 @@ class FlowSectionSpec extends StreamSpec(FlowSectionSpec.config) { } "include name in toString" in { - pending //FIXME: Flow has no simple toString anymore + pending // FIXME: Flow has no simple toString anymore val n = "Uppercase reverser" val f1 = Flow[String].map(_.toLowerCase) val f2 = Flow[String].map(_.toUpperCase).map(_.reverse).named(n).map(_.toLowerCase) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala index c8ec9ea78f..40064538d1 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala @@ -523,7 +523,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re /** * Count elements that passing by this flow - * */ + */ private class CounterFlow[T] extends GraphStageWithMaterializedValue[FlowShape[T, T], AtomicLong] { private val in = Inlet[T]("ElementCounterFlow.in") private val out = Outlet[T]("ElementCounterFlow.out") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala index d8e7e5d4ab..f2cf388b52 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala @@ -267,10 +267,10 @@ class FlowSplitWhenSpec extends StreamSpec(""" import system.dispatcher val stream = Source(1 to 5) - // Need to drop to internal API to get a plain Source[Source[Int]] instead of a SubFlow. - // `lift` doesn't cut here because it will prevent the behavior we'd like to see. - // In fact, this test is somewhat useless, as a user cannot trigger double materialization using - // the public splitWhen => SubFlow API. + // Need to drop to internal API to get a plain Source[Source[Int]] instead of a SubFlow. + // `lift` doesn't cut here because it will prevent the behavior we'd like to see. + // In fact, this test is somewhat useless, as a user cannot trigger double materialization using + // the public splitWhen => SubFlow API. .via(Split.when(_ => true, SubstreamCancelStrategy.drain)) .map { source => // run twice, but make sure we return the result of the materialization that ran second diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala index c1ae274055..b247ee4dd7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala @@ -29,8 +29,8 @@ class FlowStatefulMapSpec extends StreamSpec { "work in the happy case" in { val sinkProb = Source(List(1, 2, 3, 4, 5)) .statefulMap(() => 0)((agg, elem) => { - (agg + elem, (agg, elem)) - }, _ => None) + (agg + elem, (agg, elem)) + }, _ => None) .runWith(TestSink.probe[(Int, Int)]) sinkProb.expectSubscription().request(6) sinkProb @@ -46,7 +46,7 @@ class FlowStatefulMapSpec extends StreamSpec { val sinkProb = Source(1 to 10) .statefulMap(() => List.empty[Int])( (state, elem) => { - //grouped 3 elements into a list + // grouped 3 elements into a list val newState = elem :: state if (newState.size == 3) (Nil, newState.reverse) @@ -63,11 +63,11 @@ class FlowStatefulMapSpec extends StreamSpec { "be able to resume" in { val testSink = Source(List(1, 2, 3, 4, 5)) .statefulMap(() => 0)((agg, elem) => { - if (elem % 2 == 0) - throw ex - else - (agg + elem, (agg, elem)) - }, _ => None) + if (elem % 2 == 0) + throw ex + else + (agg + elem, (agg, elem)) + }, _ => None) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(TestSink.probe[(Int, Int)]) @@ -78,11 +78,11 @@ class FlowStatefulMapSpec extends StreamSpec { "be able to restart" in { val testSink = Source(List(1, 2, 3, 4, 5)) .statefulMap(() => 0)((agg, elem) => { - if (elem % 3 == 0) - throw ex - else - (agg + elem, (agg, elem)) - }, _ => None) + if (elem % 3 == 0) + throw ex + else + (agg + elem, (agg, elem)) + }, _ => None) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(TestSink.probe[(Int, Int)]) @@ -93,11 +93,11 @@ class FlowStatefulMapSpec extends StreamSpec { "be able to stop" in { val testSink = Source(List(1, 2, 3, 4, 5)) .statefulMap(() => 0)((agg, elem) => { - if (elem % 3 == 0) - throw ex - else - (agg + elem, (agg, elem)) - }, _ => None) + if (elem % 3 == 0) + throw ex + else + (agg + elem, (agg, elem)) + }, _ => None) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.stoppingDecider)) .runWith(TestSink.probe[(Int, Int)]) @@ -109,8 +109,8 @@ class FlowStatefulMapSpec extends StreamSpec { val (testSource, testSink) = TestSource .probe[Int] .statefulMap(() => 0)((agg, elem) => { - (agg + elem, (agg, elem)) - }, _ => None) + (agg + elem, (agg, elem)) + }, _ => None) .toMat(TestSink.probe[(Int, Int)])(Keep.both) .run() @@ -153,11 +153,12 @@ class FlowStatefulMapSpec extends StreamSpec { val testSource = TestSource .probe[Int] .statefulMap(() => 100)((agg, elem) => { - (agg + elem, (agg, elem)) - }, (state: Int) => { - promise.complete(Success(Done)) - Some((state, -1)) - }) + (agg + elem, (agg, elem)) + }, + (state: Int) => { + promise.complete(Success(Done)) + Some((state, -1)) + }) .toMat(Sink.cancelled)(Keep.left) .run() testSource.expectSubscription().expectCancellation() @@ -170,11 +171,12 @@ class FlowStatefulMapSpec extends StreamSpec { val testSource = TestSource .probe[Int] .statefulMap(() => 100)((agg, elem) => { - (agg + elem, (agg, elem)) - }, (state: Int) => { - promise.complete(Success(Done)) - Some((state, -1)) - }) + (agg + elem, (agg, elem)) + }, + (state: Int) => { + promise.complete(Success(Done)) + Some((state, -1)) + }) .toMat(Sink.fromSubscriber(testProb))(Keep.left) .run() testProb.cancel(ex) @@ -189,10 +191,11 @@ class FlowStatefulMapSpec extends StreamSpec { val matVal = Source .single(1) - .statefulMap(() => -1)((_, elem) => (elem, elem), _ => { - promise.complete(Success(Done)) - None - }) + .statefulMap(() => -1)((_, elem) => (elem, elem), + _ => { + promise.complete(Success(Done)) + None + }) .runWith(Sink.never)(mat) mat.shutdown() matVal.failed.futureValue shouldBe a[AbruptStageTerminationException] @@ -204,12 +207,13 @@ class FlowStatefulMapSpec extends StreamSpec { Source .single(1) .statefulMap(() => -1)((_, elem) => { - throw ex - (elem, elem) - }, _ => { - promise.complete(Success(Done)) - None - }) + throw ex + (elem, elem) + }, + _ => { + promise.complete(Success(Done)) + None + }) .runWith(Sink.ignore) Await.result(promise.future, 3.seconds) shouldBe Done } @@ -256,7 +260,7 @@ class FlowStatefulMapSpec extends StreamSpec { case _ => (Some(elem), Some(elem)) }, _ => None) - .collect({ case Some(elem) => elem }) + .collect { case Some(elem) => elem } .runWith(TestSink.probe[String]) .request(4) .expectNext("A") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala index 60c032a9be..cf57276387 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala @@ -31,7 +31,7 @@ class FlowThrottleSpec extends StreamSpec(""" "Throttle for single cost elements" must { "work for the happy case" in { - //Source(1 to 5).throttle(1, 100.millis, 0, Shaping) + // Source(1 to 5).throttle(1, 100.millis, 0, Shaping) Source(1 to 5) .throttle(19, 1000.millis, -1, Shaping) .runWith(TestSink.probe[Int]) @@ -162,7 +162,7 @@ class FlowThrottleSpec extends StreamSpec(""" upstream.sendNext(6) downstream.expectNoMessage(100.millis) downstream.expectNext(6) - downstream.expectNoMessage(500.millis) //wait to receive 2 in burst afterwards + downstream.expectNoMessage(500.millis) // wait to receive 2 in burst afterwards downstream.request(5) for (i <- 7 to 10) upstream.sendNext(i) downstream.receiveWithin(100.millis, 2) should be(Seq(7, 8)) @@ -191,7 +191,7 @@ class FlowThrottleSpec extends StreamSpec(""" "Throttle for various cost elements" must { "work for happy case" in { Source(1 to 5) - .throttle(1, 100.millis, 0, (_) => 1, Shaping) + .throttle(1, 100.millis, 0, _ => 1, Shaping) .runWith(TestSink.probe[Int]) .request(5) .expectNext(1, 2, 3, 4, 5) @@ -264,7 +264,7 @@ class FlowThrottleSpec extends StreamSpec(""" val downstream = TestSubscriber.probe[Int]() Source .fromPublisher(upstream) - .throttle(2, 400.millis, 5, (_) => 1, Shaping) + .throttle(2, 400.millis, 5, _ => 1, Shaping) .runWith(Sink.fromSubscriber(downstream)) // Exhaust bucket first @@ -288,7 +288,7 @@ class FlowThrottleSpec extends StreamSpec(""" val downstream = TestSubscriber.probe[Int]() Source .fromPublisher(upstream) - .throttle(2, 400.millis, 5, (e) => if (e < 9) 1 else 20, Shaping) + .throttle(2, 400.millis, 5, e => if (e < 9) 1 else 20, Shaping) .runWith(Sink.fromSubscriber(downstream)) // Exhaust bucket first @@ -300,7 +300,7 @@ class FlowThrottleSpec extends StreamSpec(""" upstream.sendNext(6) downstream.expectNoMessage(100.millis) downstream.expectNext(6) - downstream.expectNoMessage(500.millis) //wait to receive 2 in burst afterwards + downstream.expectNoMessage(500.millis) // wait to receive 2 in burst afterwards downstream.request(5) for (i <- 7 to 9) upstream.sendNext(i) downstream.receiveWithin(200.millis, 2) should be(Seq(7, 8)) @@ -308,7 +308,8 @@ class FlowThrottleSpec extends StreamSpec(""" } "throw exception when exceeding throughput in enforced mode" in { - Await.result(Source(1 to 4).throttle(2, 200.millis, 10, identity, Enforcing).runWith(Sink.seq), 2.seconds) should ===( + Await.result(Source(1 to 4).throttle(2, 200.millis, 10, identity, Enforcing).runWith(Sink.seq), + 2.seconds) should ===( 1 to 4) // Burst is 10 so this will not fail an[RateExceededException] shouldBe thrownBy { @@ -329,7 +330,7 @@ class FlowThrottleSpec extends StreamSpec(""" "handle rate calculation function exception" in { val ex = new RuntimeException with NoStackTrace Source(1 to 5) - .throttle(2, 200.millis, 0, (_) => { throw ex }, Shaping) + .throttle(2, 200.millis, 0, _ => { throw ex }, Shaping) .throttle(1, 100.millis, 5, Enforcing) .runWith(TestSink.probe[Int]) .request(5) @@ -361,10 +362,10 @@ class FlowThrottleSpec extends StreamSpec(""" counter1.set(0) if (rate < expectedMinRate.get) throw new RuntimeException(s"Too low rate, got $rate, expected min ${expectedMinRate.get}, " + - s"after ${(now - startTime).nanos.toMillis} ms at element $elem") + s"after ${(now - startTime).nanos.toMillis} ms at element $elem") if (rate > expectedMaxRate.get) throw new RuntimeException(s"Too high rate, got $rate, expected max ${expectedMaxRate.get}, " + - s"after ${(now - startTime).nanos.toMillis} ms at element $elem") + s"after ${(now - startTime).nanos.toMillis} ms at element $elem") } })(Keep.both) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala index 676add4946..9d3316e72e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala @@ -36,7 +36,7 @@ class FlowWatchTerminationSpec extends StreamSpec { val (p, future) = TestSource.probe[Int].watchTermination()(Keep.both).to(Sink.ignore).run() p.sendNext(1) p.sendError(ex) - whenReady(future.failed) { _ shouldBe (ex) } + whenReady(future.failed) { _ shouldBe ex } } "complete the future for an empty stream" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala index d8e9a20b33..ed0f606e79 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala @@ -23,7 +23,7 @@ class FlowWithContextLogSpec extends StreamSpec(""" "log() from FlowWithContextOps" must { - val supervisorPath = (SystemMaterializer(system).materializer).supervisor.path + val supervisorPath = SystemMaterializer(system).materializer.supervisor.path val LogSrc = s"akka.stream.Log($supervisorPath)" val LogClazz = classOf[Materializer] diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala index 8c08c35969..ad375f19b0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala @@ -26,7 +26,7 @@ class FlowWithContextSpec extends StreamSpec { .asSource .runWith(TestSink.probe[(Message, Long)]) .request(1) - .expectNext(((Message("az", 1L), 1L))) + .expectNext((Message("az", 1L), 1L)) .expectComplete() } @@ -42,7 +42,7 @@ class FlowWithContextSpec extends StreamSpec { .toMat(TestSink.probe[(Message, Long)])(Keep.both) .run() matValue shouldBe (42 -> materializedValue) - probe.request(1).expectNext(((Message("a", 1L), 1L))).expectComplete() + probe.request(1).expectNext((Message("a", 1L), 1L)).expectComplete() } "be able to map error via FlowWithContext.mapError" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala index d39f454f52..050b7941a6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala @@ -79,12 +79,12 @@ class FlowZipSpec extends BaseTwoStreamsSetup { } "work in fruits example" in { - //#zip + // #zip val sourceFruits = Source(List("apple", "orange", "banana")) val sourceFirstLetters = Source(List("A", "O", "B")) sourceFruits.zip(sourceFirstLetters).runWith(Sink.foreach(println)) // this will print ('apple', 'A'), ('orange', 'O'), ('banana', 'B') - //#zip + // #zip } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala index 0613f933ea..7270c76c8a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala @@ -40,10 +40,10 @@ class FlowZipWithIndexSpec extends StreamSpec { } "work in fruit example" in { - //#zip-with-index + // #zip-with-index Source(List("apple", "orange", "banana")).zipWithIndex.runWith(Sink.foreach(println)) // this will print ('apple', 0), ('orange', 1), ('banana', 2) - //#zip-with-index + // #zip-with-index } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala index 8516d264a6..2a1bbe473d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala @@ -101,7 +101,7 @@ class FlowZipWithSpec extends BaseTwoStreamsSetup { } "work in fruits example" in { - //#zip-with + // #zip-with val sourceCount = Source(List("one", "two", "three")) val sourceFruits = Source(List("apple", "orange", "banana")) @@ -111,7 +111,7 @@ class FlowZipWithSpec extends BaseTwoStreamsSetup { } .runWith(Sink.foreach(println)) // this will print 'one apple', 'two orange', 'three banana' - //#zip-with + // #zip-with } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala index ecaa391870..cac224a26d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala @@ -322,7 +322,7 @@ class FramingSpec extends StreamSpec { "report truncated frames" in { import system.dispatcher val resultFutures: List[Future[(Throwable, (ByteOrder, Int, Int, Int))]] = for { - //_ <- 1 to 10 + // _ <- 1 to 10 byteOrder <- byteOrders fieldOffset <- fieldOffsets fieldLength <- fieldLengths diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala index e10d2da574..cfe726dc0b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala @@ -26,7 +26,7 @@ object GraphFlowSpec { val outMerge = b.add(Merge[String](2)) val m2 = b.add(Merge[Int](2)) - inMerge.out.map(_ * 2) ~> m2.in(0) + inMerge.out.map(_ * 2) ~> m2.in(0) m2.out.map(_ / 2).map(i => (i + 1).toString) ~> outMerge.in(0) source2 ~> inMerge.in(0) @@ -192,8 +192,8 @@ class GraphFlowSpec extends StreamSpec { .fromGraph(GraphDSL.createGraph(source, source)(Keep.both) { implicit b => (s1, s2) => import GraphDSL.Implicits._ val merge = b.add(Merge[Int](2)) - s1.out ~> merge.in(0) - merge.out ~> Sink.fromSubscriber(probe) + s1.out ~> merge.in(0) + merge.out ~> Sink.fromSubscriber(probe) s2.out.map(_ * 10) ~> merge.in(1) ClosedShape }) @@ -238,7 +238,7 @@ class GraphFlowSpec extends StreamSpec { val sink = Sink.fromGraph(GraphDSL.createGraph(partialGraph, Flow[String].map(_.toInt))(Keep.both) { implicit b => (partial, flow) => import GraphDSL.Implicits._ - flow.out ~> partial.in + flow.out ~> partial.in partial.out.map(_.toInt) ~> Sink.fromSubscriber(probe) SinkShape(flow.in) }) @@ -297,7 +297,7 @@ class GraphFlowSpec extends StreamSpec { val (m1, _, m3) = RunnableGraph .fromGraph(GraphDSL.createGraph(source, flow, sink)(Tuple3.apply) { implicit b => (src, f, snk) => import GraphDSL.Implicits._ - src.out.map(_.toInt) ~> f.in + src.out.map(_.toInt) ~> f.in f.out.map(_.toString) ~> snk.in ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala index 9d4b0c5748..32bf30e936 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala @@ -27,8 +27,8 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val balance = b.add(Balance[Int](2)) Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -53,8 +53,8 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.createGraph(Sink.asPublisher[Int](false)) { implicit b => p2Sink => val balance = b.add(Balance[Int](2, waitForAllDownstreams = true)) Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(s1) - balance.out(1) ~> p2Sink + balance.out(0) ~> Sink.fromSubscriber(s1) + balance.out(1) ~> p2Sink ClosedShape }) .run() @@ -86,9 +86,9 @@ class GraphBalanceSpec extends StreamSpec(""" implicit b => (p2Sink, p3Sink) => val balance = b.add(Balance[Int](3, waitForAllDownstreams = true)) Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(s1) - balance.out(1) ~> p2Sink - balance.out(2) ~> p3Sink + balance.out(0) ~> Sink.fromSubscriber(s1) + balance.out(1) ~> p2Sink + balance.out(2) ~> p3Sink ClosedShape }) .run() @@ -136,7 +136,7 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.createGraph(sink, sink, sink, sink, sink)(Tuple5.apply) { implicit b => (f1, f2, f3, f4, f5) => val balance = b.add(Balance[Int](5, waitForAllDownstreams = true)) - Source(0 to 14) ~> balance.in + Source(0 to 14) ~> balance.in balance.out(0).grouped(15) ~> f1 balance.out(1).grouped(15) ~> f2 balance.out(2).grouped(15) ~> f3 @@ -157,9 +157,9 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.createGraph(outputs, outputs, outputs)(List(_, _, _)) { implicit b => (o1, o2, o3) => val balance = b.add(Balance[Int](3, waitForAllDownstreams = true)) Source.repeat(1).take(numElementsForSink * 3) ~> balance.in - balance.out(0) ~> o1 - balance.out(1) ~> o2 - balance.out(2) ~> o3 + balance.out(0) ~> o1 + balance.out(1) ~> o2 + balance.out(2) ~> o3 ClosedShape }) .run() @@ -206,8 +206,8 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val balance = b.add(Balance[Int](2)) Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -230,8 +230,8 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val balance = b.add(Balance[Int](2)) Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -255,8 +255,8 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val balance = b.add(Balance[Int](2)) Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -288,8 +288,8 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val balance = b.add(new Balance[Int](2, waitForAllDownstreams = false, eagerCancel = true)) Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -321,8 +321,8 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val balance = b.add(Balance[Int](2)) Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -355,9 +355,9 @@ class GraphBalanceSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val balance = b.add(Balance[Int](3)) Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - balance.out(2) ~> Sink.fromSubscriber(c3) + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(2) ~> Sink.fromSubscriber(c3) ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala index 84a2310b74..77fd1652f4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala @@ -28,8 +28,8 @@ class GraphBroadcastSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val bcast = b.add(Broadcast[Int](2)) Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + bcast.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -76,7 +76,7 @@ class GraphBroadcastSpec extends StreamSpec(""" GraphDSL.createGraph(headSink, headSink, headSink, headSink, headSink)((fut1, fut2, fut3, fut4, fut5) => Future.sequence(List(fut1, fut2, fut3, fut4, fut5))) { implicit b => (p1, p2, p3, p4, p5) => val bcast = b.add(Broadcast[Int](5)) - Source(List(1, 2, 3)) ~> bcast.in + Source(List(1, 2, 3)) ~> bcast.in bcast.out(0).grouped(5) ~> p1.in bcast.out(1).grouped(5) ~> p2.in bcast.out(2).grouped(5) ~> p3.in @@ -125,33 +125,33 @@ class GraphBroadcastSpec extends StreamSpec(""" headSink, headSink, headSink)(combine) { - implicit b => - (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20, p21, p22) => - val bcast = b.add(Broadcast[Int](22)) - Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0).grouped(5) ~> p1.in - bcast.out(1).grouped(5) ~> p2.in - bcast.out(2).grouped(5) ~> p3.in - bcast.out(3).grouped(5) ~> p4.in - bcast.out(4).grouped(5) ~> p5.in - bcast.out(5).grouped(5) ~> p6.in - bcast.out(6).grouped(5) ~> p7.in - bcast.out(7).grouped(5) ~> p8.in - bcast.out(8).grouped(5) ~> p9.in - bcast.out(9).grouped(5) ~> p10.in - bcast.out(10).grouped(5) ~> p11.in - bcast.out(11).grouped(5) ~> p12.in - bcast.out(12).grouped(5) ~> p13.in - bcast.out(13).grouped(5) ~> p14.in - bcast.out(14).grouped(5) ~> p15.in - bcast.out(15).grouped(5) ~> p16.in - bcast.out(16).grouped(5) ~> p17.in - bcast.out(17).grouped(5) ~> p18.in - bcast.out(18).grouped(5) ~> p19.in - bcast.out(19).grouped(5) ~> p20.in - bcast.out(20).grouped(5) ~> p21.in - bcast.out(21).grouped(5) ~> p22.in - ClosedShape + implicit b => (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20, p21, + p22) => + val bcast = b.add(Broadcast[Int](22)) + Source(List(1, 2, 3)) ~> bcast.in + bcast.out(0).grouped(5) ~> p1.in + bcast.out(1).grouped(5) ~> p2.in + bcast.out(2).grouped(5) ~> p3.in + bcast.out(3).grouped(5) ~> p4.in + bcast.out(4).grouped(5) ~> p5.in + bcast.out(5).grouped(5) ~> p6.in + bcast.out(6).grouped(5) ~> p7.in + bcast.out(7).grouped(5) ~> p8.in + bcast.out(8).grouped(5) ~> p9.in + bcast.out(9).grouped(5) ~> p10.in + bcast.out(10).grouped(5) ~> p11.in + bcast.out(11).grouped(5) ~> p12.in + bcast.out(12).grouped(5) ~> p13.in + bcast.out(13).grouped(5) ~> p14.in + bcast.out(14).grouped(5) ~> p15.in + bcast.out(15).grouped(5) ~> p16.in + bcast.out(16).grouped(5) ~> p17.in + bcast.out(17).grouped(5) ~> p18.in + bcast.out(18).grouped(5) ~> p19.in + bcast.out(19).grouped(5) ~> p20.in + bcast.out(20).grouped(5) ~> p21.in + bcast.out(21).grouped(5) ~> p22.in + ClosedShape }) .run() @@ -166,8 +166,8 @@ class GraphBroadcastSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val bcast = b.add(Broadcast[Int](2)) Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) + bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -190,8 +190,8 @@ class GraphBroadcastSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val bcast = b.add(Broadcast[Int](2)) Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> Flow[Int].named("identity-a") ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int].named("identity-b") ~> Sink.fromSubscriber(c2) + bcast.out(0) ~> Flow[Int].named("identity-a") ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int].named("identity-b") ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -215,8 +215,8 @@ class GraphBroadcastSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val bcast = b.add(Broadcast[Int](2)) Source.fromPublisher(p1.getPublisher) ~> bcast.in - bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) + bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) ClosedShape }) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala index 778884c1b8..dcaf6a18b9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala @@ -35,9 +35,9 @@ class GraphConcatSpec extends TwoStreamsSetup { val concat2 = b.add(Concat[Int]()) Source(List.empty[Int]) ~> concat1.in(0) - Source(1 to 4) ~> concat1.in(1) + Source(1 to 4) ~> concat1.in(1) - concat1.out ~> concat2.in(0) + concat1.out ~> concat2.in(0) Source(5 to 10) ~> concat2.in(1) concat2.out ~> Sink.fromSubscriber(probe) @@ -136,9 +136,9 @@ class GraphConcatSpec extends TwoStreamsSetup { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val concat = b.add(Concat[Int]()) - Source(List(1, 2, 3)) ~> concat.in(0) + Source(List(1, 2, 3)) ~> concat.in(0) Source.future(promise.future) ~> concat.in(1) - concat.out ~> Sink.fromSubscriber(subscriber) + concat.out ~> Sink.fromSubscriber(subscriber) ClosedShape }) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala index a28575ebd3..d186afacee 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala @@ -54,8 +54,8 @@ class GraphDSLCompileSpec extends StreamSpec { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val merge = b.add(Merge[String](2)) - in1 ~> f1 ~> merge.in(0) - in2 ~> f2 ~> merge.in(1) + in1 ~> f1 ~> merge.in(0) + in2 ~> f2 ~> merge.in(1) merge.out ~> f3 ~> out1 ClosedShape }) @@ -66,7 +66,7 @@ class GraphDSLCompileSpec extends StreamSpec { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val bcast = b.add(Broadcast[String](2)) - in1 ~> f1 ~> bcast.in + in1 ~> f1 ~> bcast.in bcast.out(0) ~> f2 ~> out1 bcast.out(1) ~> f3 ~> out2 ClosedShape @@ -77,7 +77,7 @@ class GraphDSLCompileSpec extends StreamSpec { "build simple balance" in { RunnableGraph.fromGraph(GraphDSL.create() { implicit b => val balance = b.add(Balance[String](2)) - in1 ~> f1 ~> balance.in + in1 ~> f1 ~> balance.in balance.out(0) ~> f2 ~> out1 balance.out(1) ~> f3 ~> out2 ClosedShape @@ -89,9 +89,9 @@ class GraphDSLCompileSpec extends StreamSpec { .fromGraph(GraphDSL.create() { implicit b => val merge = b.add(Merge[String](2)) val bcast = b.add(Broadcast[String](2)) - in1 ~> f1 ~> merge.in(0) - in2 ~> f2 ~> merge.in(1) - merge ~> f3 ~> bcast + in1 ~> f1 ~> merge.in(0) + in2 ~> f2 ~> merge.in(1) + merge ~> f3 ~> bcast bcast.out(0) ~> f4 ~> out1 bcast.out(1) ~> f5 ~> out2 ClosedShape @@ -105,10 +105,10 @@ class GraphDSLCompileSpec extends StreamSpec { import GraphDSL.Implicits._ val merge = b.add(Merge[String](2)) val bcast = b.add(Broadcast[String](2)) - b.add(in1) ~> f1 ~> merge.in(0) - merge.out ~> f2 ~> bcast.in + b.add(in1) ~> f1 ~> merge.in(0) + merge.out ~> f2 ~> bcast.in bcast.out(0) ~> f3 ~> b.add(out1) - b.add(in2) ~> f4 ~> merge.in(1) + b.add(in2) ~> f4 ~> merge.in(1) bcast.out(1) ~> f5 ~> b.add(out2) ClosedShape }) @@ -132,12 +132,12 @@ class GraphDSLCompileSpec extends StreamSpec { val bcast1 = b.add(Broadcast[String](2)) val bcast2 = b.add(Broadcast[String](2)) val feedbackLoopBuffer = Flow[String].buffer(10, OverflowStrategy.dropBuffer) - in1 ~> f1 ~> merge.in(0) - merge ~> f2 ~> bcast1 - bcast1.out(0) ~> f3 ~> out1 + in1 ~> f1 ~> merge.in(0) + merge ~> f2 ~> bcast1 + bcast1.out(0) ~> f3 ~> out1 bcast1.out(1) ~> feedbackLoopBuffer ~> bcast2 - bcast2.out(0) ~> f5 ~> merge.in(1) // cycle - bcast2.out(1) ~> f6 ~> out2 + bcast2.out(0) ~> f5 ~> merge.in(1) // cycle + bcast2.out(1) ~> f6 ~> out2 ClosedShape }) }.getMessage.toLowerCase should include("cycle") @@ -152,9 +152,9 @@ class GraphDSLCompileSpec extends StreamSpec { val bcast2 = b.add(Broadcast[String](2)) val feedbackLoopBuffer = Flow[String].buffer(10, OverflowStrategy.dropBuffer) import GraphDSL.Implicits._ - b.add(in1) ~> f1 ~> merge ~> f2 ~> bcast1 ~> f3 ~> b.add(out1) - bcast1 ~> feedbackLoopBuffer ~> bcast2 ~> f5 ~> merge - bcast2 ~> f6 ~> b.add(out2) + b.add(in1) ~> f1 ~> merge ~> f2 ~> bcast1 ~> f3 ~> b.add(out1) + bcast1 ~> feedbackLoopBuffer ~> bcast2 ~> f5 ~> merge + bcast2 ~> f6 ~> b.add(out2) ClosedShape }) .run() @@ -166,7 +166,7 @@ class GraphDSLCompileSpec extends StreamSpec { val bcast = b.add(Broadcast[String](2)) val merge = b.add(Merge[String](2)) import GraphDSL.Implicits._ - in1 ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out1 + in1 ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out1 bcast ~> f4 ~> merge ClosedShape }) @@ -193,13 +193,13 @@ class GraphDSLCompileSpec extends StreamSpec { def f(s: String) = Flow[String].via(op[String, String]).named(s) import GraphDSL.Implicits._ - in7 ~> f("a") ~> b7 ~> f("b") ~> m11 ~> f("c") ~> b11 ~> f("d") ~> out2 - b11 ~> f("e") ~> m9 ~> f("f") ~> out9 - b7 ~> f("g") ~> m8 ~> f("h") ~> m9 + in7 ~> f("a") ~> b7 ~> f("b") ~> m11 ~> f("c") ~> b11 ~> f("d") ~> out2 + b11 ~> f("e") ~> m9 ~> f("f") ~> out9 + b7 ~> f("g") ~> m8 ~> f("h") ~> m9 b11 ~> f("i") ~> m10 ~> f("j") ~> out10 in5 ~> f("k") ~> m11 - in3 ~> f("l") ~> b3 ~> f("m") ~> m8 - b3 ~> f("n") ~> m10 + in3 ~> f("l") ~> b3 ~> f("m") ~> m8 + b3 ~> f("n") ~> m10 ClosedShape }) .run() @@ -211,8 +211,8 @@ class GraphDSLCompileSpec extends StreamSpec { val merge = b.add(Merge[String](2)) val bcast = b.add(Broadcast[String](2)) import GraphDSL.Implicits._ - in1 ~> merge ~> bcast ~> out1 - in2 ~> merge + in1 ~> merge ~> bcast ~> out1 + in2 ~> merge bcast ~> out2 ClosedShape }) @@ -227,9 +227,9 @@ class GraphDSLCompileSpec extends StreamSpec { val out = Sink.asPublisher[(Int, String)](false) import GraphDSL.Implicits._ Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Flow[Int].map(_ * 2) ~> zip.in0 - unzip.out1 ~> zip.in1 - zip.out ~> out + unzip.out0 ~> Flow[Int].map(_ * 2) ~> zip.in0 + unzip.out1 ~> zip.in1 + zip.out ~> out ClosedShape }) .run() @@ -273,9 +273,9 @@ class GraphDSLCompileSpec extends StreamSpec { RunnableGraph.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val merge = b.add(Merge[Fruit](2)) - Source.fromIterator[Fruit](apples) ~> Flow[Fruit] ~> merge.in(0) - Source.fromIterator[Apple](apples) ~> Flow[Apple] ~> merge.in(1) - merge.out ~> Flow[Fruit].map(identity) ~> Sink.fromSubscriber(TestSubscriber.manualProbe[Fruit]()) + Source.fromIterator[Fruit](apples) ~> Flow[Fruit] ~> merge.in(0) + Source.fromIterator[Apple](apples) ~> Flow[Apple] ~> merge.in(1) + merge.out ~> Flow[Fruit].map(identity) ~> Sink.fromSubscriber(TestSubscriber.manualProbe[Fruit]()) ClosedShape }) } @@ -286,31 +286,31 @@ class GraphDSLCompileSpec extends StreamSpec { val fruitMerge = b.add(Merge[Fruit](2)) Source.fromIterator[Fruit](apples) ~> fruitMerge Source.fromIterator[Apple](apples) ~> fruitMerge - fruitMerge ~> Sink.head[Fruit] + fruitMerge ~> Sink.head[Fruit] "fruitMerge ~> Sink.head[Apple]" shouldNot compile val appleMerge = b.add(Merge[Apple](2)) "Source[Fruit](apples) ~> appleMerge" shouldNot compile - Source.empty[Apple] ~> appleMerge + Source.empty[Apple] ~> appleMerge Source.fromIterator[Apple](apples) ~> appleMerge - appleMerge ~> Sink.head[Fruit] + appleMerge ~> Sink.head[Fruit] val appleMerge2 = b.add(Merge[Apple](2)) - Source.empty[Apple] ~> appleMerge2 + Source.empty[Apple] ~> appleMerge2 Source.fromIterator[Apple](apples) ~> appleMerge2 - appleMerge2 ~> Sink.head[Apple] + appleMerge2 ~> Sink.head[Apple] val fruitBcast = b.add(Broadcast[Fruit](2)) Source.fromIterator[Apple](apples) ~> fruitBcast - fruitBcast ~> Sink.head[Fruit] - fruitBcast ~> Sink.ignore + fruitBcast ~> Sink.head[Fruit] + fruitBcast ~> Sink.ignore "fruitBcast ~> Sink.head[Apple]" shouldNot compile val appleBcast = b.add(Broadcast[Apple](2)) "Source[Fruit](apples) ~> appleBcast" shouldNot compile Source.fromIterator[Apple](apples) ~> appleBcast - appleBcast ~> Sink.head[Fruit] - appleBcast ~> Sink.head[Apple] + appleBcast ~> Sink.head[Fruit] + appleBcast ~> Sink.head[Apple] ClosedShape }) } @@ -326,24 +326,24 @@ class GraphDSLCompileSpec extends StreamSpec { val whatever = b.add(Sink.asPublisher[Any](false)) import GraphDSL.Implicits._ b.add(Source.fromIterator[Fruit](apples)) ~> merge.in(0) - appleSource ~> merge.in(1) - appleSource ~> merge.in(2) - fruitSource ~> merge.in(3) - fruitSource ~> Flow[Fruit].map(identity) ~> merge.in(4) - appleSource ~> Flow[Apple].map(identity) ~> merge.in(5) - b.add(Source.fromIterator(apples)) ~> merge.in(6) - b.add(Source.fromIterator(apples)) ~> Flow[Fruit].map(identity) ~> merge.in(7) - b.add(Source.fromIterator(apples)) ~> Flow[Apple].map(identity) ~> merge.in(8) - merge.out ~> Flow[Fruit].map(identity) ~> outA + appleSource ~> merge.in(1) + appleSource ~> merge.in(2) + fruitSource ~> merge.in(3) + fruitSource ~> Flow[Fruit].map(identity) ~> merge.in(4) + appleSource ~> Flow[Apple].map(identity) ~> merge.in(5) + b.add(Source.fromIterator(apples)) ~> merge.in(6) + b.add(Source.fromIterator(apples)) ~> Flow[Fruit].map(identity) ~> merge.in(7) + b.add(Source.fromIterator(apples)) ~> Flow[Apple].map(identity) ~> merge.in(8) + merge.out ~> Flow[Fruit].map(identity) ~> outA b.add(Source.fromIterator(apples)) ~> Flow[Apple] ~> merge.in(9) b.add(Source.fromIterator(apples)) ~> Flow[Apple] ~> outB b.add(Source.fromIterator(apples)) ~> Flow[Apple] ~> b.add(Sink.asPublisher[Fruit](false)) - appleSource ~> Flow[Apple] ~> merge.in(10) + appleSource ~> Flow[Apple] ~> merge.in(10) Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out1 ~> whatever - unzip.out0 ~> b.add(Sink.asPublisher[Any](false)) + unzip.out1 ~> whatever + unzip.out0 ~> b.add(Sink.asPublisher[Any](false)) "merge.out ~> b.add(Broadcast[Apple](2))" shouldNot compile "merge.out ~> Flow[Fruit].map(identity) ~> b.add(Broadcast[Apple](2))" shouldNot compile diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala index 771850fd2d..40f014db0e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala @@ -28,7 +28,7 @@ class GraphMatValueSpec extends StreamSpec { val sub = TestSubscriber.manualProbe[Int]() val f = RunnableGraph .fromGraph(GraphDSL.createGraph(foldSink) { implicit b => fold => - Source(1 to 10) ~> fold + Source(1 to 10) ~> fold b.materializedValue.mapAsync(4)(identity) ~> Sink.fromSubscriber(sub) ClosedShape }) @@ -47,7 +47,7 @@ class GraphMatValueSpec extends StreamSpec { val f = RunnableGraph .fromGraph(GraphDSL.createGraph(foldSink) { implicit b => fold => val zip = b.add(ZipWith[Int, Int, Int](_ + _)) - Source(1 to 10) ~> fold + Source(1 to 10) ~> fold b.materializedValue.mapAsync(4)(identity) ~> zip.in0 b.materializedValue.mapAsync(4)(identity) ~> zip.in1 @@ -78,7 +78,7 @@ class GraphMatValueSpec extends StreamSpec { "allow exposing the materialized value as port even if wrapped and the final materialized value is Unit" in { val noMatSource: Source[Int, Unit] = - foldFeedbackSource.mapAsync(4)(identity).map(_ + 100).mapMaterializedValue((_) => ()) + foldFeedbackSource.mapAsync(4)(identity).map(_ + 100).mapMaterializedValue(_ => ()) Await.result(noMatSource.runWith(Sink.head), 3.seconds) should ===(155) } @@ -87,7 +87,7 @@ class GraphMatValueSpec extends StreamSpec { implicit b => (s1, s2) => val zip = b.add(ZipWith[Int, Int, Int](_ + _)) - s1.out.mapAsync(4)(identity) ~> zip.in0 + s1.out.mapAsync(4)(identity) ~> zip.in0 s2.out.mapAsync(4)(identity).map(_ * 100) ~> zip.in1 SourceShape(zip.out) }) @@ -95,7 +95,7 @@ class GraphMatValueSpec extends StreamSpec { val compositeSource2 = Source.fromGraph(GraphDSL.createGraph(compositeSource1, compositeSource1)(Keep.both) { implicit b => (s1, s2) => val zip = b.add(ZipWith[Int, Int, Int](_ + _)) - s1.out ~> zip.in0 + s1.out ~> zip.in0 s2.out.map(_ * 10000) ~> zip.in1 SourceShape(zip.out) }) @@ -137,7 +137,7 @@ class GraphMatValueSpec extends StreamSpec { Source.empty.mapMaterializedValue(_ => done = true) ~> Sink.ignore ClosedShape } - val r = RunnableGraph.fromGraph(GraphDSL.createGraph(Sink.ignore) { implicit b => (s) => + val r = RunnableGraph.fromGraph(GraphDSL.createGraph(Sink.ignore) { implicit b => s => b.add(g) Source(1 to 10) ~> s ClosedShape @@ -226,7 +226,7 @@ class GraphMatValueSpec extends StreamSpec { val nest3 = Flow[String].via(nest2) val nest4 = Flow[String].via(nest3) - //fails + // fails val matValue = Source(List("")).via(nest4).to(Sink.ignore).run() matValue should ===(NotUsed) @@ -248,20 +248,20 @@ class GraphMatValueSpec extends StreamSpec { "build more complicated graph with flows optimized for identity flows" in { val flow1 = Flow.fromSinkAndSourceMat(Sink.ignore, Source.single(1).viaMat(Flow[Int])(Keep.both))(Keep.both) val (mA, (m1, m2)) = Source.single(8).viaMat(flow1)(Keep.right).to(Sink.ignore).run() - Await.result(mA, 1.second) should ===(Done) //from Sink.ignore - m1 should ===(NotUsed) //from Source.single(1) - m2 should ===(NotUsed) //from Flow[Int] + Await.result(mA, 1.second) should ===(Done) // from Sink.ignore + m1 should ===(NotUsed) // from Source.single(1) + m2 should ===(NotUsed) // from Flow[Int] val flow2 = Flow.fromSinkAndSourceMat(Sink.ignore, Source.maybe[Int].viaMat(Flow[Int])(Keep.left))(Keep.both) val (mB, m3) = Source.single(8).viaMat(flow2)(Keep.right).to(Sink.ignore).run() - Await.result(mB, 1.second) should ===(Done) //from Sink.ignore + Await.result(mB, 1.second) should ===(Done) // from Sink.ignore // Fails with ClassCastException if value is wrong - m3.success(None) //from Source.maybe[Int] + m3.success(None) // from Source.maybe[Int] val flow3 = Flow.fromSinkAndSourceMat(Sink.ignore, Source.single(1).viaMat(Flow[Int])(Keep.right))(Keep.both) val (mC, m4) = Source.single(8).viaMat(flow3)(Keep.right).to(Sink.ignore).run() - Await.result(mC, 1.second) should ===(Done) //from Sink.ignore - m4 should ===(NotUsed) //from Flow[Int] + Await.result(mC, 1.second) should ===(Done) // from Sink.ignore + m4 should ===(NotUsed) // from Flow[Int] } "provide a new materialized value for each materialization" in { @@ -273,7 +273,7 @@ class GraphMatValueSpec extends StreamSpec { import GraphDSL.Implicits._ val merge = b.add(Merge[Int](2)) - s ~> merge + s ~> merge b.materializedValue ~> merge SourceShape(merge.out) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala index b1b45584d9..427f5d76e7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala @@ -38,9 +38,9 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.createGraph(up1, up2, up3)((_, _, _)) { implicit b => (s1, s2, s3) => val m = b.add(MergeLatest[Int](3)) - s1 ~> m - s2 ~> m - s3 ~> m + s1 ~> m + s2 ~> m + s3 ~> m m.out ~> Sink.fromSubscriber(probe) ClosedShape }) @@ -72,9 +72,9 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.createGraph(up1, up2, up3)((_, _, _)) { implicit b => (s1, s2, s3) => val m = b.add(MergeLatest[Int](3)) - s1 ~> m - s2 ~> m - s3 ~> m + s1 ~> m + s2 ~> m + s3 ~> m m.out ~> Sink.fromSubscriber(probe) ClosedShape }) @@ -137,8 +137,8 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.createGraph(up1, up2)((_, _)) { implicit b => (s1, s2) => val m = b.add(MergeLatest[Int](2, true)) - s1 ~> m - s2 ~> m + s1 ~> m + s2 ~> m m.out ~> Sink.fromSubscriber(probe) ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala index 412335a46b..13a90a68ee 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala @@ -41,9 +41,9 @@ class GraphMergePreferredSpec extends TwoStreamsSetup { preferred ~> merge.preferred merge.out.grouped(numElements * 2) ~> sink.in - aux ~> merge.in(0) - aux ~> merge.in(1) - aux ~> merge.in(2) + aux ~> merge.in(0) + aux ~> merge.in(1) + aux ~> merge.in(2) ClosedShape }) .run() @@ -58,16 +58,16 @@ class GraphMergePreferredSpec extends TwoStreamsSetup { Source(1 to 100) ~> merge.preferred merge.out.grouped(500) ~> sink.in - Source(101 to 200) ~> merge.in(0) - Source(201 to 300) ~> merge.in(1) - Source(301 to 400) ~> merge.in(2) + Source(101 to 200) ~> merge.in(0) + Source(201 to 300) ~> merge.in(1) + Source(301 to 400) ~> merge.in(2) ClosedShape }) .run() val resultSeq = Await.result(result, 3.seconds) resultSeq.toSet should ===((1 to 400).toSet) - //test ordering of elements coming from each of the flows + // test ordering of elements coming from each of the flows resultSeq.filter(_ <= 100) should ===(1 to 100) resultSeq.filter(e => e > 100 && e <= 200) should ===(101 to 200) resultSeq.filter(e => e > 200 && e <= 300) should ===(201 to 300) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala index 33538f834c..b6996b1e9a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala @@ -150,9 +150,9 @@ class GraphMergePrioritizedSpec extends TwoStreamsSetup { // introduce a delay on the consuming side making it more likely that // the actual prioritization happens and elements does not just pass through val delayFirst = b.add(Flow[T].initialDelay(50.millis)) - s1.out ~> merge.in(0) - s2.out ~> merge.in(1) - s3.out ~> merge.in(2) + s1.out ~> merge.in(0) + s2.out ~> merge.in(1) + s3.out ~> merge.in(2) merge.out ~> delayFirst ~> Sink.fromSubscriber(probe) ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala index 8af63ad63c..f37fa513fb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala @@ -40,8 +40,8 @@ class GraphMergeSpec extends TwoStreamsSetup { val m2 = b.add(Merge[Int](2)) source1 ~> m1.in(0) - m1.out ~> Flow[Int].map(_ * 2) ~> m2.in(0) - m2.out ~> Flow[Int].map(_ / 2).map(_ + 1) ~> Sink.fromSubscriber(probe) + m1.out ~> Flow[Int].map(_ * 2) ~> m2.in(0) + m2.out ~> Flow[Int].map(_ / 2).map(_ + 1) ~> Sink.fromSubscriber(probe) source2 ~> m1.in(1) source3 ~> m2.in(1) @@ -56,7 +56,7 @@ class GraphMergeSpec extends TwoStreamsSetup { subscription.request(1) collected :+= probe.expectNext() } - //test ordering of elements coming from each of nonempty flows + // test ordering of elements coming from each of nonempty flows collected.filter(_ <= 4) should ===(1 to 4) collected.filter(_ >= 5) should ===(5 to 10) @@ -92,12 +92,12 @@ class GraphMergeSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.create() { implicit b => val merge = b.add(Merge[Int](6)) - source1 ~> merge.in(0) - source2 ~> merge.in(1) - source3 ~> merge.in(2) - source4 ~> merge.in(3) - source5 ~> merge.in(4) - source6 ~> merge.in(5) + source1 ~> merge.in(0) + source2 ~> merge.in(1) + source3 ~> merge.in(2) + source4 ~> merge.in(3) + source5 ~> merge.in(4) + source6 ~> merge.in(5) merge.out ~> Sink.fromSubscriber(probe) ClosedShape @@ -179,8 +179,8 @@ class GraphMergeSpec extends TwoStreamsSetup { val (graphSubscriber1, graphSubscriber2) = RunnableGraph .fromGraph(GraphDSL.createGraph(src1, src2)((_, _)) { implicit b => (s1, s2) => val merge = b.add(Merge[Int](2)) - s1.out ~> merge.in(0) - s2.out ~> merge.in(1) + s1.out ~> merge.in(0) + s2.out ~> merge.in(1) merge.out ~> Sink.fromSubscriber(down) ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala index cbf7838743..8eb6db52bb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala @@ -48,15 +48,15 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "support broadcast - merge layouts" in { val resultFuture = RunnableGraph - .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => (sink) => + .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => sink => import GraphDSL.Implicits._ val bcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) - Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> merge.in(0) + Source(List(1, 2, 3)) ~> bcast.in + bcast.out(0) ~> merge.in(0) bcast.out(1).map(_ + 3) ~> merge.in(1) - merge.out.grouped(10) ~> sink.in + merge.out.grouped(10) ~> sink.in ClosedShape }) .run() @@ -67,7 +67,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "support balance - merge (parallelization) layouts" in { val elements = 0 to 10 val out = RunnableGraph - .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => (sink) => + .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => sink => import GraphDSL.Implicits._ val balance = b.add(Balance[Int](5)) val merge = b.add(Merge[Int](5)) @@ -104,26 +104,26 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" val in7 = Source(List(7)) // First layer - in7 ~> b7.in + in7 ~> b7.in b7.out(0) ~> m11.in(0) b7.out(1) ~> m8.in(0) in5 ~> m11.in(1) - in3 ~> b3.in + in3 ~> b3.in b3.out(0) ~> m8.in(1) b3.out(1) ~> m10.in(0) // Second layer - m11.out ~> b11.in + m11.out ~> b11.in b11.out(0).grouped(1000) ~> sink2.in // Vertex 2 is omitted since it has only one in and out - b11.out(1) ~> m9.in(0) - b11.out(2) ~> m10.in(1) + b11.out(1) ~> m9.in(0) + b11.out(2) ~> m10.in(1) m8.out ~> m9.in(1) // Third layer - m9.out.grouped(1000) ~> sink9.in + m9.out.grouped(1000) ~> sink9.in m10.out.grouped(1000) ~> sink10.in ClosedShape @@ -139,15 +139,15 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "allow adding of flows to sources and sinks to flows" in { val resultFuture = RunnableGraph - .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => (sink) => + .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => sink => import GraphDSL.Implicits._ val bcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) Source(List(1, 2, 3)).map(_ * 2) ~> bcast.in - bcast.out(0) ~> merge.in(0) - bcast.out(1).map(_ + 3) ~> merge.in(1) - merge.out.grouped(10) ~> sink.in + bcast.out(0) ~> merge.in(0) + bcast.out(1).map(_ + 3) ~> merge.in(1) + merge.out.grouped(10) ~> sink.in ClosedShape }) .run() @@ -183,7 +183,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" import GraphDSL.Implicits._ val merge = b.add(Merge[Int](2)) - Source(List(1, 2, 3)) ~> s1.in1 + Source(List(1, 2, 3)) ~> s1.in1 Source(List(10, 11, 12)) ~> s1.in2 s1.out1 ~> s2.in1 @@ -208,7 +208,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "be possible to use with generated components" in { implicit val ex = system.dispatcher - //#graph-from-list + // #graph-from-list val sinks = immutable .Seq("a", "b", "c") .map(prefix => Flow[String].filter(str => str.startsWith(prefix)).toMat(Sink.head[String])(Keep.right)) @@ -225,7 +225,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" }) val matList: Seq[Future[String]] = g.run() - //#graph-from-list + // #graph-from-list val result: Seq[String] = Await.result(Future.sequence(matList), 3.seconds) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala index 0d744c2a76..5ad19bf033 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala @@ -32,8 +32,8 @@ class GraphPartialSpec extends StreamSpec(""" .fromGraph(GraphDSL.createGraph(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b => (d1, d2, sink) => Source(List(1, 2, 3)) ~> d1.in - d1.out ~> d2.in - d2.out.grouped(100) ~> sink.in + d1.out ~> d2.in + d2.out.grouped(100) ~> sink.in ClosedShape }) .run() @@ -46,8 +46,8 @@ class GraphPartialSpec extends StreamSpec(""" val bcast = b.add(Broadcast[Int](3)) val zip = b.add(ZipWith((a: Int, b: Int) => a + b)) - bcast.out(0) ~> zip.in0 - bcast.out(1) ~> zip.in1 + bcast.out(0) ~> zip.in0 + bcast.out(1) ~> zip.in1 bcast.out(2).grouped(100) ~> sink.in FlowShape(bcast.in, zip.out) } @@ -56,8 +56,8 @@ class GraphPartialSpec extends StreamSpec(""" .fromGraph(GraphDSL.createGraph(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b => (d1, d2, sink) => Source(List(1, 2, 3)) ~> d1.in - d1.out ~> d2.in - d2.out.grouped(100) ~> sink.in + d1.out ~> d2.in + d2.out.grouped(100) ~> sink.in ClosedShape }) .run() @@ -79,7 +79,7 @@ class GraphPartialSpec extends StreamSpec(""" bcast.out(1) ~> zip.in1 bcast.out(2) ~> s1.in - zip.out ~> bcast2.in + zip.out ~> bcast2.in bcast2.out(0) ~> s2.in FlowShape(bcast.in, bcast2.out(1)) @@ -89,8 +89,8 @@ class GraphPartialSpec extends StreamSpec(""" .fromGraph(GraphDSL.createGraph(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b => (d1, d2, sink) => Source(List(1, 2, 3)) ~> d1.in - d1.out ~> d2.in - d2.out.grouped(100) ~> sink.in + d1.out ~> d2.in + d2.out.grouped(100) ~> sink.in ClosedShape }) .run() @@ -111,7 +111,7 @@ class GraphPartialSpec extends StreamSpec(""" .fromGraph(GraphDSL.createGraph(Sink.head[Int], p)(Keep.left) { implicit b => (sink, flow) => import GraphDSL.Implicits._ Source.single(0) ~> flow.in - flow.out ~> sink.in + flow.out ~> sink.in ClosedShape }) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala index 82b8bfbf89..c7ac93ec28 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala @@ -26,15 +26,16 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if (g > 3) => 0 - case l if (l < 3) => 1 - case e if (e == 3) => 2 - })) + val partition = b.add(Partition[Int](3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => 2 + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in - partition.out(0) ~> sink1.in - partition.out(1) ~> sink2.in - partition.out(2) ~> sink3.in + partition.out(0) ~> sink1.in + partition.out(1) ~> sink2.in + partition.out(2) ~> sink3.in ClosedShape }) .run() @@ -51,13 +52,14 @@ class GraphPartitionSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[String](2, { - case s if (s.length > 4) => 0 - case _ => 1 - })) + val partition = b.add(Partition[String](2, + { + case s if s.length > 4 => 0 + case _ => 1 + })) Source(List("this", "is", "just", "another", "test")) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.fromSubscriber(c2) + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -82,8 +84,8 @@ class GraphPartitionSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val partition = b.add(Partition[Int](2, { case l if l < 6 => 0; case _ => 1 })) Source(List(6, 3)) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.fromSubscriber(c2) + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -106,8 +108,8 @@ class GraphPartitionSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val partition = b.add(new Partition[Int](2, { case l if l < 6 => 0; case _ => 1 }, false)) Source.fromPublisher(p1.getPublisher) ~> partition.in - partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -139,8 +141,8 @@ class GraphPartitionSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val partition = b.add(new Partition[Int](2, { case l if l < 6 => 0; case _ => 1 }, true)) Source.fromPublisher(p1.getPublisher) ~> partition.in - partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -164,13 +166,14 @@ class GraphPartitionSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[String](2, { - case s if s == "a" || s == "b" => 0 - case _ => 1 - })) + val partition = b.add(Partition[String](2, + { + case s if s == "a" || s == "b" => 0 + case _ => 1 + })) Source(List("a", "b", "c", "d")) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.fromSubscriber(c2) + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -190,13 +193,14 @@ class GraphPartitionSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[String](2, { - case s if s == "a" || s == "b" => 0 - case _ => 1 - })) + val partition = b.add(Partition[String](2, + { + case s if s == "a" || s == "b" => 0 + case _ => 1 + })) Source(List("a", "b", "c")) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.fromSubscriber(c2) + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -219,10 +223,10 @@ class GraphPartitionSpec extends StreamSpec(""" val g = RunnableGraph.fromGraph(GraphDSL.createGraph(s) { implicit b => sink => val partition = b.add(Partition[Int](2, { case l if l < 4 => 0; case _ => 1 })) val merge = b.add(Merge[Int](2)) - Source(input) ~> partition.in + Source(input) ~> partition.in partition.out(0) ~> merge.in(0) partition.out(1) ~> merge.in(1) - merge.out ~> sink.in + merge.out ~> sink.in ClosedShape }) @@ -241,7 +245,7 @@ class GraphPartitionSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val partition = b.add(Partition[Int](2, { case l if l < 6 => 0; case _ => 1 })) - Source(List(6)) ~> partition.in + Source(List(6)) ~> partition.in partition.out(0) ~> Sink.fromSubscriber(c1) partition.out(1) ~> Sink.fromSubscriber(c2) ClosedShape @@ -281,15 +285,16 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if g > 3 => 0 - case l if l < 3 => 1 - case e if e == 3 => throw TE("Resume") - })) + val partition = b.add(Partition[Int](3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => throw TE("Resume") + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in - partition.out(0) ~> sink1.in - partition.out(1) ~> sink2.in - partition.out(2) ~> sink3.in + partition.out(0) ~> sink1.in + partition.out(1) ~> sink2.in + partition.out(2) ~> sink3.in ClosedShape }) .withAttributes(ActorAttributes.supervisionStrategy(_ => Supervision.Resume)) @@ -304,15 +309,16 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if g > 3 => 0 - case l if l < 3 => 1 - case e if e == 3 => throw TE("Restart") - })) + val partition = b.add(Partition[Int](3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => throw TE("Restart") + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in - partition.out(0) ~> sink1.in - partition.out(1) ~> sink2.in - partition.out(2) ~> sink3.in + partition.out(0) ~> sink1.in + partition.out(1) ~> sink2.in + partition.out(2) ~> sink3.in ClosedShape }) .withAttributes(ActorAttributes.supervisionStrategy(_ => Supervision.Restart)) @@ -328,15 +334,16 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if g > 3 => 0 - case l if l < 3 => 1 - case e if e == 3 => -1 // out of bounds - })) + val partition = b.add(Partition[Int](3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => -1 // out of bounds + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in - partition.out(0) ~> sink1.in - partition.out(1) ~> sink2.in - partition.out(2) ~> sink3.in + partition.out(0) ~> sink1.in + partition.out(1) ~> sink2.in + partition.out(2) ~> sink3.in ClosedShape }) .withAttributes(ActorAttributes.supervisionStrategy(_ => Supervision.Resume)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala index fae6263d78..2f45c1d30f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala @@ -45,13 +45,15 @@ class GraphStageTimersSpec extends StreamSpec { override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { val tickCount = Iterator.from(1) - setHandler(in, new InHandler { - override def onPush() = push(out, grab(in)) - }) + setHandler(in, + new InHandler { + override def onPush() = push(out, grab(in)) + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def preStart() = { sideChannel.asyncCallback = getAsyncCallback(onTestEvent) @@ -158,16 +160,18 @@ class GraphStageTimersSpec extends StreamSpec { override def preStart(): Unit = scheduleWithFixedDelay("tick", 100.millis, 100.millis) - setHandler(out, new OutHandler { - override def onPull() = () // Do nothing - override def onDownstreamFinish(cause: Throwable) = completeStage() - }) + setHandler(out, + new OutHandler { + override def onPull() = () // Do nothing + override def onDownstreamFinish(cause: Throwable) = completeStage() + }) - setHandler(in, new InHandler { - override def onPush() = () // Do nothing - override def onUpstreamFinish() = completeStage() - override def onUpstreamFailure(ex: Throwable) = failStage(ex) - }) + setHandler(in, + new InHandler { + override def onPush() = () // Do nothing + override def onUpstreamFinish() = completeStage() + override def onUpstreamFailure(ex: Throwable) = failStage(ex) + }) override def onTimer(timerKey: Any) = { tickCount += 1 @@ -205,13 +209,15 @@ class GraphStageTimersSpec extends StreamSpec { override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { override def preStart(): Unit = scheduleOnce("tick", 100.millis) - setHandler(in, new InHandler { - override def onPush() = () // Ignore - }) + setHandler(in, + new InHandler { + override def onPush() = () // Ignore + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def onTimer(timerKey: Any) = throw exception } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala index 70f59f83bb..f199006209 100755 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala @@ -25,8 +25,8 @@ class GraphUnzipSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val unzip = b.add(Unzip[Int, String]()) Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out1 ~> Flow[String].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) - unzip.out0 ~> Flow[Int].buffer(16, OverflowStrategy.backpressure).map(_ * 2) ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Flow[String].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + unzip.out0 ~> Flow[Int].buffer(16, OverflowStrategy.backpressure).map(_ * 2) ~> Sink.fromSubscriber(c1) ClosedShape }) .run() @@ -57,8 +57,8 @@ class GraphUnzipSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val unzip = b.add(Unzip[Int, String]()) Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -81,8 +81,8 @@ class GraphUnzipSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val unzip = b.add(Unzip[Int, String]()) Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -105,8 +105,8 @@ class GraphUnzipSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val unzip = b.add(Unzip[Int, String]()) Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -130,8 +130,8 @@ class GraphUnzipSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val unzip = b.add(Unzip[Int, String]()) Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -156,8 +156,8 @@ class GraphUnzipSpec extends StreamSpec(""" .fromGraph(GraphDSL.create() { implicit b => val unzip = b.add(Unzip[Int, String]()) Source.fromPublisher(p1.getPublisher) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) ClosedShape }) .run() @@ -186,9 +186,9 @@ class GraphUnzipSpec extends StreamSpec(""" val zip = b.add(Zip[Int, String]()) val unzip = b.add(Unzip[Int, String]()) Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> zip.in0 - unzip.out1 ~> zip.in1 - zip.out ~> Sink.fromSubscriber(c1) + unzip.out0 ~> zip.in0 + unzip.out1 ~> zip.in1 + zip.out ~> Sink.fromSubscriber(c1) ClosedShape }) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala index 248e7f39a1..2efd61dd99 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala @@ -58,8 +58,8 @@ class GraphUnzipWithSpec extends StreamSpec(""" val f = fixture(b) Source.fromPublisher(p) ~> f.in - f.left ~> Sink.fromSubscriber(leftSubscriber) - f.right ~> Sink.fromSubscriber(rightSubscriber) + f.left ~> Sink.fromSubscriber(leftSubscriber) + f.right ~> Sink.fromSubscriber(rightSubscriber) ClosedShape }) @@ -109,7 +109,7 @@ class GraphUnzipWithSpec extends StreamSpec(""" val unzip = b.add(UnzipWith(f)) Source(1 to 4) ~> unzip.in - unzip.out0 ~> Flow[LeftOutput].buffer(4, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(leftProbe) + unzip.out0 ~> Flow[LeftOutput].buffer(4, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(leftProbe) unzip.out1 ~> Flow[RightOutput].buffer(4, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(rightProbe) ClosedShape @@ -216,7 +216,7 @@ class GraphUnzipWithSpec extends StreamSpec(""" probe.ref ! killSwitch NotUsed } - unzip.out0 ~> killSwitchFlow[Int] ~> Sink.ignore + unzip.out0 ~> killSwitchFlow[Int] ~> Sink.ignore unzip.out1 ~> killSwitchFlow[String] ~> Sink.ignore ClosedShape diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala index 76cfc98cd9..c793d52b14 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala @@ -289,8 +289,8 @@ class GraphZipLatestSpec extends StreamSpec with ScalaCheckPropertyChecks with S implicit b => (ts, as, bs) => import GraphDSL.Implicits._ val zipLatest = b.add(new ZipLatest[A, B]()) - as ~> zipLatest.in0 - bs ~> zipLatest.in1 + as ~> zipLatest.in0 + bs ~> zipLatest.in1 zipLatest.out ~> ts ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala index 3ffb120774..fb244a3cb4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala @@ -48,9 +48,9 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.create() { implicit b => val zipLatest = b.add(ZipLatestWith((_: Int) + (_: Int))) val never = Source.single(3).initialDelay(1 day) - Source(1 to 2).concat(never) ~> zipLatest.in0 + Source(1 to 2).concat(never) ~> zipLatest.in0 Source.fromPublisher(upstreamProbe) ~> zipLatest.in1 - zipLatest.out ~> Sink.fromSubscriber(downstreamProbe) + zipLatest.out ~> Sink.fromSubscriber(downstreamProbe) ClosedShape }) .run() @@ -84,7 +84,7 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { val zip = b.add(ZipLatestWith[Int, Int, Int]((_: Int) / (_: Int))) val never = Source.single(2).initialDelay(1 day) Source.single(1).concat(never) ~> zip.in0 - Source(-2 to 2) ~> zip.in1 + Source(-2 to 2) ~> zip.in1 zip.out ~> Sink.fromSubscriber(probe) @@ -117,9 +117,9 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val zipLatest = b.add(ZipLatestWith((_: Int) + (_: Int), false)) - Source.fromPublisher(upstreamProbe) ~> zipLatest.in0 + Source.fromPublisher(upstreamProbe) ~> zipLatest.in0 Source.fromPublisher(upstreamProbe2) ~> zipLatest.in1 - zipLatest.out ~> Sink.fromSubscriber(downstreamProbe) + zipLatest.out ~> Sink.fromSubscriber(downstreamProbe) ClosedShape }) .run() @@ -157,9 +157,9 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val zipLatest = b.add(ZipLatestWith((_: Int) + (_: Int), true)) - Source(1 to 2) ~> zipLatest.in0 + Source(1 to 2) ~> zipLatest.in0 Source.fromPublisher(upstreamProbe) ~> zipLatest.in1 - zipLatest.out ~> Sink.fromSubscriber(downstreamProbe) + zipLatest.out ~> Sink.fromSubscriber(downstreamProbe) ClosedShape }) .run() @@ -221,8 +221,8 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.create() { implicit b => val zip = b.add(ZipLatestWith(Person.apply _)) - Source.single("Caplin") ~> zip.in0 - Source.single("Capybara") ~> zip.in1 + Source.single("Caplin") ~> zip.in0 + Source.single("Capybara") ~> zip.in1 Source.fromPublisher(upstreamProbe).take(1) ~> zip.in2 zip.out ~> Sink.fromSubscriber(downstreamProbe) @@ -273,27 +273,27 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { // odd input ports will be Int, even input ports will be String val zip = b.add(ZipLatestWith(sum22)) - Source.single(1) ~> zip.in0 - Source.single(2).map(_.toString) ~> zip.in1 - Source.single(3) ~> zip.in2 - Source.single(4).map(_.toString) ~> zip.in3 - Source.single(5) ~> zip.in4 - Source.single(6).map(_.toString) ~> zip.in5 - Source.single(7) ~> zip.in6 - Source.single(8).map(_.toString) ~> zip.in7 - Source.single(9) ~> zip.in8 - Source.single(10).map(_.toString) ~> zip.in9 - Source.single(11) ~> zip.in10 - Source.single(12).map(_.toString) ~> zip.in11 - Source.single(13) ~> zip.in12 - Source.single(14).map(_.toString) ~> zip.in13 - Source.single(15) ~> zip.in14 - Source.single(16).map(_.toString) ~> zip.in15 - Source.single(17) ~> zip.in16 - Source.single(18).map(_.toString) ~> zip.in17 - Source.single(19) ~> zip.in18 - Source.single(20).map(_.toString) ~> zip.in19 - Source.single(21) ~> zip.in20 + Source.single(1) ~> zip.in0 + Source.single(2).map(_.toString) ~> zip.in1 + Source.single(3) ~> zip.in2 + Source.single(4).map(_.toString) ~> zip.in3 + Source.single(5) ~> zip.in4 + Source.single(6).map(_.toString) ~> zip.in5 + Source.single(7) ~> zip.in6 + Source.single(8).map(_.toString) ~> zip.in7 + Source.single(9) ~> zip.in8 + Source.single(10).map(_.toString) ~> zip.in9 + Source.single(11) ~> zip.in10 + Source.single(12).map(_.toString) ~> zip.in11 + Source.single(13) ~> zip.in12 + Source.single(14).map(_.toString) ~> zip.in13 + Source.single(15) ~> zip.in14 + Source.single(16).map(_.toString) ~> zip.in15 + Source.single(17) ~> zip.in16 + Source.single(18).map(_.toString) ~> zip.in17 + Source.single(19) ~> zip.in18 + Source.single(20).map(_.toString) ~> zip.in19 + Source.single(21) ~> zip.in20 Source.fromPublisher(upstreamProbe).map(_.toString) ~> zip.in21 zip.out ~> Sink.fromSubscriber(downstreamProbe) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala index 57d34a2bcb..09dfdf094e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala @@ -67,7 +67,7 @@ class GraphZipNSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zipN.in(0) Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + zipN.out ~> out ClosedShape }) @@ -94,7 +94,7 @@ class GraphZipNSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zipN.in(0) Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + zipN.out ~> out ClosedShape }) @@ -122,7 +122,7 @@ class GraphZipNSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zipN.in(0) Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + zipN.out ~> out ClosedShape }) @@ -149,7 +149,7 @@ class GraphZipNSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zipN.in(0) Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + zipN.out ~> out ClosedShape }) @@ -177,7 +177,7 @@ class GraphZipNSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zipN.in(0) Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + zipN.out ~> out ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala index d318e8ac9e..ccd592843b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala @@ -33,7 +33,7 @@ class GraphZipSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.create() { implicit b => val zip = b.add(Zip[Int, String]()) - Source(1 to 4) ~> zip.in0 + Source(1 to 4) ~> zip.in0 Source(List("A", "B", "C", "D", "E", "F")) ~> zip.in1 zip.out ~> Sink.fromSubscriber(probe) @@ -66,7 +66,7 @@ class GraphZipSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zip.in0 Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + zip.out ~> out ClosedShape }) @@ -93,7 +93,7 @@ class GraphZipSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zip.in0 Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + zip.out ~> out ClosedShape }) @@ -121,7 +121,7 @@ class GraphZipSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zip.in0 Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + zip.out ~> out ClosedShape }) @@ -148,7 +148,7 @@ class GraphZipSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zip.in0 Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + zip.out ~> out ClosedShape }) @@ -176,7 +176,7 @@ class GraphZipSpec extends TwoStreamsSetup { Source.fromPublisher(upstream1) ~> zip.in0 Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + zip.out ~> out ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala index 96fa1f376d..5cea5f8146 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala @@ -31,7 +31,7 @@ class GraphZipWithNSpec extends TwoStreamsSetup { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val zip = b.add(ZipWithN((_: immutable.Seq[Int]).sum)(2)) - Source(1 to 4) ~> zip.in(0) + Source(1 to 4) ~> zip.in(0) Source(10 to 40 by 10) ~> zip.in(1) zip.out ~> Sink.fromSubscriber(probe) @@ -61,7 +61,7 @@ class GraphZipWithNSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.create() { implicit b => val zip = b.add(ZipWithN((_: immutable.Seq[Int]).foldLeft(1)(_ / _))(2)) - Source(1 to 4) ~> zip.in(0) + Source(1 to 4) ~> zip.in(0) Source(-2 to 2) ~> zip.in(1) zip.out ~> Sink.fromSubscriber(probe) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala index 04c7df76e9..6c47a37800 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala @@ -30,7 +30,7 @@ class GraphZipWithSpec extends TwoStreamsSetup { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val zip = b.add(ZipWith((_: Int) + (_: Int))) - Source(1 to 4) ~> zip.in0 + Source(1 to 4) ~> zip.in0 Source(10 to 40 by 10) ~> zip.in1 zip.out ~> Sink.fromSubscriber(probe) @@ -60,7 +60,7 @@ class GraphZipWithSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.create() { implicit b => val zip = b.add(ZipWith[Int, Int, Int]((_: Int) / (_: Int))) - Source(1 to 4) ~> zip.in0 + Source(1 to 4) ~> zip.in0 Source(-2 to 2) ~> zip.in1 zip.out ~> Sink.fromSubscriber(probe) @@ -128,9 +128,9 @@ class GraphZipWithSpec extends TwoStreamsSetup { .fromGraph(GraphDSL.create() { implicit b => val zip = b.add(ZipWith(Person.apply _)) - Source.single("Caplin") ~> zip.in0 + Source.single("Caplin") ~> zip.in0 Source.single("Capybara") ~> zip.in1 - Source.single(3) ~> zip.in2 + Source.single(3) ~> zip.in2 zip.out ~> Sink.fromSubscriber(probe) @@ -178,27 +178,27 @@ class GraphZipWithSpec extends TwoStreamsSetup { // odd input ports will be Int, even input ports will be String val zip = b.add(ZipWith(sum22)) - Source.single(1) ~> zip.in0 - Source.single(2).map(_.toString) ~> zip.in1 - Source.single(3) ~> zip.in2 - Source.single(4).map(_.toString) ~> zip.in3 - Source.single(5) ~> zip.in4 - Source.single(6).map(_.toString) ~> zip.in5 - Source.single(7) ~> zip.in6 - Source.single(8).map(_.toString) ~> zip.in7 - Source.single(9) ~> zip.in8 + Source.single(1) ~> zip.in0 + Source.single(2).map(_.toString) ~> zip.in1 + Source.single(3) ~> zip.in2 + Source.single(4).map(_.toString) ~> zip.in3 + Source.single(5) ~> zip.in4 + Source.single(6).map(_.toString) ~> zip.in5 + Source.single(7) ~> zip.in6 + Source.single(8).map(_.toString) ~> zip.in7 + Source.single(9) ~> zip.in8 Source.single(10).map(_.toString) ~> zip.in9 - Source.single(11) ~> zip.in10 + Source.single(11) ~> zip.in10 Source.single(12).map(_.toString) ~> zip.in11 - Source.single(13) ~> zip.in12 + Source.single(13) ~> zip.in12 Source.single(14).map(_.toString) ~> zip.in13 - Source.single(15) ~> zip.in14 + Source.single(15) ~> zip.in14 Source.single(16).map(_.toString) ~> zip.in15 - Source.single(17) ~> zip.in16 + Source.single(17) ~> zip.in16 Source.single(18).map(_.toString) ~> zip.in17 - Source.single(19) ~> zip.in18 + Source.single(19) ~> zip.in18 Source.single(20).map(_.toString) ~> zip.in19 - Source.single(21) ~> zip.in20 + Source.single(21) ~> zip.in20 Source.single(22).map(_.toString) ~> zip.in21 zip.out ~> Sink.fromSubscriber(probe) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala index 0b0fa78021..c2a10f0896 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala @@ -20,12 +20,12 @@ class HeadSinkSpec extends StreamSpec(""" "yield the first value for simple source" in { implicit val ec = system.dispatcher - //#head-operator-example + // #head-operator-example val source = Source(1 to 10) val result: Future[Int] = source.runWith(Sink.head) result.map(println) // 1 - //#head-operator-example + // #head-operator-example result.futureValue shouldEqual 1 } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala index 6392168835..aa2dd4c64b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala @@ -517,13 +517,13 @@ class HubSpec extends StreamSpec { "be able to use as round-robin router" in { val source = Source(0 until 10).runWith(PartitionHub.statefulSink(() => { - var n = 0L + var n = 0L - (info, _) => { - n += 1 - info.consumerIdByIdx((n % info.size).toInt) - } - }, startAfterNrOfConsumers = 2, bufferSize = 8)) + (info, _) => { + n += 1 + info.consumerIdByIdx((n % info.size).toInt) + } + }, startAfterNrOfConsumers = 2, bufferSize = 8)) val result1 = source.runWith(Sink.seq) val result2 = source.runWith(Sink.seq) result1.futureValue should ===(1 to 9 by 2) @@ -532,20 +532,20 @@ class HubSpec extends StreamSpec { "be able to use as sticky session router" in { val source = Source(List("usr-1", "usr-2", "usr-1", "usr-3")).runWith(PartitionHub.statefulSink(() => { - var sessions = Map.empty[String, Long] - var n = 0L + var sessions = Map.empty[String, Long] + var n = 0L - (info, elem) => { - sessions.get(elem) match { - case Some(id) if info.consumerIds.exists(_ == id) => id - case _ => - n += 1 - val id = info.consumerIdByIdx((n % info.size).toInt) - sessions = sessions.updated(elem, id) - id + (info, elem) => { + sessions.get(elem) match { + case Some(id) if info.consumerIds.exists(_ == id) => id + case _ => + n += 1 + val id = info.consumerIdByIdx((n % info.size).toInt) + sessions = sessions.updated(elem, id) + id + } } - } - }, startAfterNrOfConsumers = 2, bufferSize = 8)) + }, startAfterNrOfConsumers = 2, bufferSize = 8)) val result1 = source.runWith(Sink.seq) val result2 = source.runWith(Sink.seq) result1.futureValue should ===(List("usr-2")) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala index e6b961dac8..ba287e1f41 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala @@ -18,12 +18,12 @@ class LastSinkSpec extends StreamSpec with ScriptedTest { "A Flow with Sink.last" must { "yield the last value" in { - //#last-operator-example + // #last-operator-example val source = Source(1 to 10) val result: Future[Int] = source.runWith(Sink.last) result.map(println) // 10 - //#last-operator-example + // #last-operator-example result.futureValue shouldEqual 10 } @@ -55,12 +55,12 @@ class LastSinkSpec extends StreamSpec with ScriptedTest { } "yield None for empty stream" in { - //#lastOption-operator-example + // #lastOption-operator-example val source = Source.empty[Int] val result: Future[Option[Int]] = source.runWith(Sink.lastOption) result.map(println) // None - //#lastOption-operator-example + // #lastOption-operator-example result.futureValue shouldEqual None } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala index 81a2fc16bd..61845a4825 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala @@ -217,7 +217,7 @@ class LazyFlowSpec extends StreamSpec(""" val deferredMatVal = result._1 val list = result._2 list.failed.futureValue shouldBe a[TE] - //futureFlow's behaviour in case of mat failure (follows flatMapPrefix) + // futureFlow's behaviour in case of mat failure (follows flatMapPrefix) deferredMatVal.failed.futureValue shouldBe a[NeverMaterializedException] deferredMatVal.failed.futureValue.getCause shouldEqual TE("mat-failed") } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala index f3b6ee3370..6bbef73ebb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala @@ -23,9 +23,9 @@ class PublisherSinkSpec extends StreamSpec { val bcast = b.add(Broadcast[Int](2)) - Source(0 to 5) ~> bcast.in + Source(0 to 5) ~> bcast.in bcast.out(0).map(_ * 2) ~> p1.in - bcast.out(1) ~> p2.in + bcast.out(1) ~> p2.in ClosedShape }) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala index 4ef5dd9751..903726a041 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala @@ -130,9 +130,9 @@ class QueueSinkSpec extends StreamSpec { "fail future immediately if stream already canceled" in { val queue = Source.empty[Int].runWith(Sink.queue()) // race here because no way to observe that queue sink saw termination - awaitAssert({ + awaitAssert { queue.pull().failed.futureValue shouldBe a[StreamDetachedException] - }) + } } "timeout future when stream cannot provide data" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala index 5198ba4628..2d0e678ecd 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala @@ -211,7 +211,7 @@ class QueueSourceSpec extends StreamSpec { val s = TestSubscriber.manualProbe[Int]() val queue = Source.queue(1, OverflowStrategy.fail).to(Sink.fromSubscriber(s)).run() queue.watchCompletion().pipeTo(testActor) - queue.offer(1) //need to wait when first offer is done as initialization can be done in this moment + queue.offer(1) // need to wait when first offer is done as initialization can be done in this moment queue.offer(2) expectMsgClass(classOf[Status.Failure]) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala index a46ccd7970..264a2b601e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala @@ -170,14 +170,14 @@ class RetryFlowSpec extends StreamSpec(""" "allow retrying a successful element" in { class SomeContext - //#retry-success + // #retry-success val flow: FlowWithContext[Int, SomeContext, Int, SomeContext, NotUsed] = // ??? - //#retry-success + // #retry-success FlowWithContext.fromTuples[Int, SomeContext, Int, SomeContext, NotUsed](Flow.fromFunction { case (i, ctx) => i / 2 -> ctx }) - //#retry-success + // #retry-success val retryFlow: FlowWithContext[Int, SomeContext, Int, SomeContext, NotUsed] = RetryFlow.withBackoffAndContext( @@ -189,7 +189,7 @@ class RetryFlowSpec extends StreamSpec(""" case ((_, _), (result, ctx)) if result > 0 => Some(result -> ctx) case _ => None }) - //#retry-success + // #retry-success val (source, sink) = TestSource.probe[(Int, SomeContext)].via(retryFlow).toMat(TestSink.probe)(Keep.both).run() @@ -374,7 +374,8 @@ class RetryFlowSpec extends StreamSpec(""" externalIn.expectCancellation() } - "propagate error before the RetryFlow, while on retry spin" in new ConstructBench[Int, Int, Int]((v, _) => Some(v)) { + "propagate error before the RetryFlow, while on retry spin" in new ConstructBench[Int, Int, Int]((v, _) => + Some(v)) { externalOut.request(92) // spinning message externalIn.sendNext(1 -> 0) @@ -507,9 +508,7 @@ class RetryFlowSpec extends StreamSpec(""" } "allow more demand in inner flow (but never pass in more than one element into the retrying cycle)" in new AllSucceedBench[ - InData, - Ctx2, - OutData] { + InData, Ctx2, OutData] { externalOut.request(1) internalIn.expectRequest() shouldBe 1L internalOut.request(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala index 6e06e9f51d..bd78c597a2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala @@ -171,7 +171,7 @@ class ReverseArrowSpec extends StreamSpec { val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) s <~ f Source.empty ~> f - source ~> f + source ~> f ClosedShape }) .run(), @@ -185,7 +185,7 @@ class ReverseArrowSpec extends StreamSpec { val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) val src = b.add(source) Source.empty ~> f - src ~> f + src ~> f (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("no more inlets free") ClosedShape }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala index fe915c05f4..30643149af 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala @@ -19,7 +19,7 @@ class SeqSinkSpec extends StreamSpec(""" "Sink.toSeq" must { "return a Seq[T] from a Source" in { - val input = (1 to 6) + val input = 1 to 6 val future: Future[immutable.Seq[Int]] = Source(input).runWith(Sink.seq) val result: immutable.Seq[Int] = Await.result(future, remainingOrDefault) result should be(input.toSeq) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala index e8c301df75..8926f629af 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala @@ -36,14 +36,14 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 1 module" in { val probes = Array.fill(3)(TestSubscriber.manualProbe[Int]()) val sink = Sink.fromGraph(GraphDSL.createGraph(Sink.fromSubscriber(probes(0))) { implicit b => s0 => val bcast = b.add(Broadcast[Int](3)) - bcast.out(0) ~> Flow[Int].filter(_ == 0) ~> s0.in + bcast.out(0) ~> Flow[Int].filter(_ == 0) ~> s0.in for (i <- 1 to 2) bcast.out(i).filter(_ == i) ~> Sink.fromSubscriber(probes(i)) SinkShape(bcast.in) }) @@ -54,7 +54,7 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 2 modules" in { @@ -76,7 +76,7 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 3 modules" in { @@ -99,7 +99,7 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "combine to many outputs with simplified API" in { @@ -257,13 +257,13 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "The reduce sink" must { "sum up 1 to 10 correctly" in { - //#reduce-operator-example + // #reduce-operator-example val source = Source(1 to 10) val result = source.runWith(Sink.reduce[Int]((a, b) => a + b)) result.map(println)(system.dispatcher) // will print // 55 - //#reduce-operator-example + // #reduce-operator-example assert(result.futureValue == (1 to 10).sum) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala index 9de0981469..1d0c5104ae 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala @@ -32,11 +32,11 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "produce exactly one element" in { implicit val ec = system.dispatcher - //#source-single + // #source-single val s: Future[immutable.Seq[Int]] = Source.single(1).runWith(Sink.seq) s.foreach(list => println(s"Collected elements: $list")) // prints: Collected elements: List(1) - //#source-single + // #source-single s.futureValue should ===(immutable.Seq(1)) @@ -175,14 +175,14 @@ class SourceSpec extends StreamSpec with DefaultTimeout { // compiler to check the correct materialized value of type = SourceQueueWithComplete[Int] available val combined1: Source[Int, BoundedSourceQueue[Int]] = - Source.combineMat(queueSource, intSeqSource)(Concat(_))(Keep.left) //Keep.left (i.e. preserve queueSource's materialized value) + Source.combineMat(queueSource, intSeqSource)(Concat(_))(Keep.left) // Keep.left (i.e. preserve queueSource's materialized value) val (queue1, sinkProbe1) = combined1.toMat(TestSink.probe[Int])(Keep.both).run() sinkProbe1.request(6) queue1.offer(10) queue1.offer(20) queue1.offer(30) - queue1.complete() //complete queueSource so that combined1 with `Concat` then pulls elements from intSeqSource + queue1.complete() // complete queueSource so that combined1 with `Concat` then pulls elements from intSeqSource sinkProbe1.expectNext(10) sinkProbe1.expectNext(20) sinkProbe1.expectNext(30) @@ -192,19 +192,19 @@ class SourceSpec extends StreamSpec with DefaultTimeout { // compiler to check the correct materialized value of type = SourceQueueWithComplete[Int] available val combined2: Source[Int, BoundedSourceQueue[Int]] = - //queueSource to be the second of combined source - Source.combineMat(intSeqSource, queueSource)(Concat(_))(Keep.right) //Keep.right (i.e. preserve queueSource's materialized value) + // queueSource to be the second of combined source + Source.combineMat(intSeqSource, queueSource)(Concat(_))(Keep.right) // Keep.right (i.e. preserve queueSource's materialized value) val (queue2, sinkProbe2) = combined2.toMat(TestSink.probe[Int])(Keep.both).run() sinkProbe2.request(6) queue2.offer(10) queue2.offer(20) queue2.offer(30) - queue2.complete() //complete queueSource so that combined1 with `Concat` then pulls elements from queueSource - sinkProbe2.expectNext(1) //as intSeqSource iss the first in combined source, elements from intSeqSource come first + queue2.complete() // complete queueSource so that combined1 with `Concat` then pulls elements from queueSource + sinkProbe2.expectNext(1) // as intSeqSource iss the first in combined source, elements from intSeqSource come first sinkProbe2.expectNext(2) sinkProbe2.expectNext(3) - sinkProbe2.expectNext(10) //after intSeqSource run out elements, queueSource elements come + sinkProbe2.expectNext(10) // after intSeqSource run out elements, queueSource elements come sinkProbe2.expectNext(20) sinkProbe2.expectNext(30) } @@ -271,7 +271,7 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "generate an unbounded fibonacci sequence" in { Source - .unfold((0, 1))({ case (a, b) => Some((b, a + b) -> a) }) + .unfold((0, 1)) { case (a, b) => Some((b, a + b) -> a) } .take(36) .runFold(List.empty[Int]) { case (xs, x) => x :: xs } .futureValue should ===(expected) @@ -338,24 +338,24 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "continuously generate the same sequence" in { val expected = Seq(1, 2, 3, 1, 2, 3, 1, 2, 3) - //#cycle + // #cycle Source .cycle(() => List(1, 2, 3).iterator) .grouped(9) .runWith(Sink.head) // This will produce the Seq(1, 2, 3, 1, 2, 3, 1, 2, 3) - //#cycle + // #cycle .futureValue should ===(expected) } "throw an exception in case of empty iterator" in { - //#cycle-error + // #cycle-error val empty = Iterator.empty Source .cycle(() => empty) .runWith(Sink.head) // This will return a failed future with an `IllegalArgumentException` - //#cycle-error + // #cycle-error .failed .futureValue shouldBe an[IllegalArgumentException] } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala index 79d3ac8a1c..e79cee39dc 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala @@ -155,7 +155,8 @@ class StageActorRefSpec extends StreamSpec with ImplicitSender { stageRef ! PoisonPill // should log a warning, and NOT stop the stage. val actorName = """StageActorRef-[\d+]""" - val expectedMsg = s"[PoisonPill|Kill] message sent to StageActorRef($actorName) will be ignored,since it is not a real Actor. " + + val expectedMsg = + s"[PoisonPill|Kill] message sent to StageActorRef($actorName) will be ignored,since it is not a real Actor. " + "Use a custom message type to communicate with it instead." expectMsgPF(1.second, expectedMsg) { case Logging.Warning(_, _, msg) => expectedMsg.r.pattern.matcher(msg.toString).matches() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala index c79c40f648..513771af8f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala @@ -37,9 +37,10 @@ class StreamConvertersSpec extends StreamSpec with DefaultTimeout { import scala.compat.java8.FunctionConverters._ def javaStreamInts = - IntStream.iterate(1, { (i: Int) => - i + 1 - }.asJava) + IntStream.iterate(1, + { (i: Int) => + i + 1 + }.asJava) "work with Java collections" in { val list = new java.util.LinkedList[Integer]() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala index d75fad50e5..a65b92154c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala @@ -28,7 +28,7 @@ class TakeLastSinkSpec extends StreamSpec { "return top three student based on GPA correctly" in { implicit val ex = system.dispatcher - //#takeLast-operator-example + // #takeLast-operator-example case class Student(name: String, gpa: Double) val students = List( @@ -56,7 +56,7 @@ class TakeLastSinkSpec extends StreamSpec { Name: Kendra, GPA: 4.2 */ - //#takeLast-operator-example + // #takeLast-operator-example result.futureValue shouldEqual students.takeRight(3) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala index 2f8395edc2..161396ca62 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala @@ -65,9 +65,9 @@ class TickSourceSpec extends StreamSpec { .fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val zip = b.add(Zip[Int, String]()) - Source(1 to 100) ~> zip.in0 + Source(1 to 100) ~> zip.in0 Source.tick(1.second, 1.second, "tick") ~> zip.in1 - zip.out ~> Flow[(Int, String)].map { case (n, _) => n } ~> Sink.fromSubscriber(c) + zip.out ~> Flow[(Int, String)].map { case (n, _) => n } ~> Sink.fromSubscriber(c) ClosedShape }) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala index 17313b4ae4..915d0fb530 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala @@ -371,12 +371,14 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val closed = Promise[Done]() Source .unfoldResourceAsync[String, Iterator[String]]({ () => - Future(Iterator("a", "b", "c")) - }, { m => - Future(if (m.hasNext) Some(m.next()) else None) - }, { _ => - closed.success(Done).future - }) + Future(Iterator("a", "b", "c")) + }, + { m => + Future(if (m.hasNext) Some(m.next()) else None) + }, + { _ => + closed.success(Done).future + }) .map(m => println(s"Elem=> $m")) .runWith(Sink.cancelled) @@ -387,10 +389,11 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val closeProbe = TestProbe() val probe = TestSubscriber.probe[Unit]() Source - .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), _ => Future.failed(TE("read failed")), { _ => - closeProbe.ref ! "closed" - Future.successful(Done) - }) + .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), _ => Future.failed(TE("read failed")), + { _ => + closeProbe.ref ! "closed" + Future.successful(Done) + }) .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.request(1L) @@ -402,10 +405,11 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val closeProbe = TestProbe() val probe = TestSubscriber.probe[Unit]() Source - .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), _ => throw TE("read failed"), { _ => - closeProbe.ref ! "closed" - Future.successful(Done) - }) + .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), _ => throw TE("read failed"), + { _ => + closeProbe.ref ! "closed" + Future.successful(Done) + }) .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.request(1L) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala index 91a8d19936..89a29d0d3d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala @@ -79,10 +79,11 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "continue when Strategy is Resume and exception happened" in { val p = Source - .unfoldResource[String, BufferedReader](() => newBufferedReader(), reader => { - val s = reader.readLine() - if (s != null && s.contains("b")) throw TE("") else Option(s) - }, reader => reader.close()) + .unfoldResource[String, BufferedReader](() => newBufferedReader(), + reader => { + val s = reader.readLine() + if (s != null && s.contains("b")) throw TE("") else Option(s) + }, reader => reader.close()) .withAttributes(supervisionStrategy(resumingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -100,10 +101,11 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "close and open stream again when Strategy is Restart" in { val p = Source - .unfoldResource[String, BufferedReader](() => newBufferedReader(), reader => { - val s = reader.readLine() - if (s != null && s.contains("b")) throw TE("") else Option(s) - }, reader => reader.close()) + .unfoldResource[String, BufferedReader](() => newBufferedReader(), + reader => { + val s = reader.readLine() + if (s != null && s.contains("b")) throw TE("") else Option(s) + }, reader => reader.close()) .withAttributes(supervisionStrategy(restartingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -122,10 +124,11 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val chunkSize = 50 val buffer = new Array[Char](chunkSize) val p = Source - .unfoldResource[ByteString, Reader](() => newBufferedReader(), reader => { - val s = reader.read(buffer) - if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None - }, reader => reader.close()) + .unfoldResource[ByteString, Reader](() => newBufferedReader(), + reader => { + val s = reader.read(buffer) + if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None + }, reader => reader.close()) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[ByteString]() @@ -218,7 +221,8 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val probe = Source .unfoldResource[Int, Int]( () => 23, // the best resource there is - _ => throw TE("failing read"), { _ => + _ => throw TE("failing read"), + { _ => closedCounter.incrementAndGet() if (closedCounter.get == 1) throw TE("boom") }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala index 620077af96..3769120817 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala @@ -24,12 +24,12 @@ class MaterializerStateSpec extends AkkaSpec() { Source.maybe[Int].map(_.toString).zipWithIndex.runWith(Sink.seq) awaitAssert({ - val snapshot = MaterializerState.streamSnapshots(mat).futureValue + val snapshot = MaterializerState.streamSnapshots(mat).futureValue - snapshot should have size (1) - snapshot.head.activeInterpreters should have size (1) - snapshot.head.activeInterpreters.head.logics should have size (4) // all 4 operators - }, remainingOrDefault) + snapshot should have size 1 + snapshot.head.activeInterpreters should have size 1 + snapshot.head.activeInterpreters.head.logics should have size 4 // all 4 operators + }, remainingOrDefault) } finally { mat.shutdown() } @@ -40,22 +40,23 @@ class MaterializerStateSpec extends AkkaSpec() { Source.future(promise.future).map(_.toString).zipWithIndex.runWith(Sink.seq) awaitAssert({ - val snapshot = MaterializerState.streamSnapshots(system).futureValue + val snapshot = MaterializerState.streamSnapshots(system).futureValue - snapshot should have size (1) - snapshot.head.activeInterpreters should have size (1) - snapshot.head.activeInterpreters.head.logics should have size (4) // all 4 operators - }, remainingOrDefault) + snapshot should have size 1 + snapshot.head.activeInterpreters should have size 1 + snapshot.head.activeInterpreters.head.logics should have size 4 // all 4 operators + }, remainingOrDefault) promise.success(1) } "snapshot a running stream that includes a TLSActor" in { Source.never - .via(Tcp(system).outgoingConnectionWithTls(InetSocketAddress.createUnresolved("akka.io", 443), () => { - val engine = SSLContext.getDefault.createSSLEngine("akka.io", 443) - engine.setUseClientMode(true) - engine - })) + .via(Tcp(system).outgoingConnectionWithTls(InetSocketAddress.createUnresolved("akka.io", 443), + () => { + val engine = SSLContext.getDefault.createSSLEngine("akka.io", 443) + engine.setUseClientMode(true) + engine + })) .runWith(Sink.seq) val snapshots = MaterializerState.streamSnapshots(system).futureValue @@ -73,11 +74,11 @@ class MaterializerStateSpec extends AkkaSpec() { .runWith(probe) out.requestNext("one") awaitAssert({ - val snapshot = MaterializerState.streamSnapshots(mat).futureValue - snapshot should have size (1) - snapshot.head.activeInterpreters should have size (1) - snapshot.head.activeInterpreters.head.stoppedLogics should have size (2) // Source.single and a detach - }, remainingOrDefault) + val snapshot = MaterializerState.streamSnapshots(mat).futureValue + snapshot should have size 1 + snapshot.head.activeInterpreters should have size 1 + snapshot.head.activeInterpreters.head.stoppedLogics should have size 2 // Source.single and a detach + }, remainingOrDefault) } finally { mat.shutdown() @@ -92,20 +93,21 @@ class MaterializerStateSpec extends AkkaSpec() { val graph = Flow.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ - val partition = b.add(Partition[String](4, { - case "green" => 0 - case "red" => 1 - case "blue" => 2 - case _ => 3 - })) + val partition = b.add(Partition[String](4, + { + case "green" => 0 + case "red" => 1 + case "blue" => 2 + case _ => 3 + })) val merge = b.add(Merge[String](4, eagerComplete = false)) val discard = b.add(Sink.ignore.async) val one = b.add(Source.single("purple")) - partition.out(0) ~> merge.in(0) + partition.out(0) ~> merge.in(0) partition.out(1).via(Flow[String].map(_.toUpperCase()).async) ~> merge.in(1) - partition.out(2).groupBy(2, identity).mergeSubstreams ~> merge.in(2) - partition.out(3) ~> discard + partition.out(2).groupBy(2, identity).mergeSubstreams ~> merge.in(2) + partition.out(3) ~> discard one ~> merge.in(3) diff --git a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala index 12ed0b49ff..1a0e6b847c 100644 --- a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala +++ b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala @@ -25,10 +25,11 @@ object ActorSourceSinkExample { case class Fail(ex: Exception) extends Protocol val source: Source[Protocol, ActorRef[Protocol]] = ActorSource.actorRef[Protocol](completionMatcher = { - case Complete => - }, failureMatcher = { - case Fail(ex) => ex - }, bufferSize = 8, overflowStrategy = OverflowStrategy.fail) + case Complete => + }, + failureMatcher = { + case Fail(ex) => ex + }, bufferSize = 8, overflowStrategy = OverflowStrategy.fail) val ref = source .collect { @@ -166,7 +167,7 @@ object ActorSourceSinkExample { onInitMessage = (responseActorRef: ActorRef[Ack]) => Init(responseActorRef), ackMessage = Ack, onCompleteMessage = Complete, - onFailureMessage = (exception) => Fail(exception)) + onFailureMessage = exception => Fail(exception)) Source.single("msg1").runWith(sink) // #actor-sink-ref-with-backpressure diff --git a/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala b/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala index eebc335e98..cad28d0360 100644 --- a/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala +++ b/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala @@ -24,13 +24,13 @@ import scala.concurrent.duration._ import scala.concurrent.{ Await, Future } object ActorFlowSpec { - //#ask-actor + // #ask-actor final case class Asking(s: String, replyTo: ActorRef[Reply]) final case class Reply(msg: String) final case class AskingWithStatus(s: String, replyTo: ActorRef[StatusReply[String]]) - //#ask-actor + // #ask-actor } class ActorFlowSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { @@ -143,15 +143,15 @@ class ActorFlowSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { } "produce asked elements in order" in { - //#ask-actor + // #ask-actor val ref = spawn(Behaviors.receiveMessage[Asking] { asking => asking.replyTo ! Reply(asking.s + "!!!") Behaviors.same }) - //#ask-actor + // #ask-actor - //#ask + // #ask implicit val timeout: Timeout = 1.second val askFlow: Flow[String, Reply, NotUsed] = @@ -163,7 +163,7 @@ class ActorFlowSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { val in: Future[immutable.Seq[String]] = Source(1 to 50).map(_.toString).via(askFlow).map(_.msg).runWith(Sink.seq) - //#ask + // #ask askFlowExplicit.map(identity) in.futureValue shouldEqual List.tabulate(51)(i => s"$i!!!").drop(1) diff --git a/akka-stream/src/main/scala-jdk-9/akka/stream/impl/JavaFlowAndRsConverters.scala b/akka-stream/src/main/scala-jdk-9/akka/stream/impl/JavaFlowAndRsConverters.scala index 5fcc9032d0..73aa322bba 100644 --- a/akka-stream/src/main/scala-jdk-9/akka/stream/impl/JavaFlowAndRsConverters.scala +++ b/akka-stream/src/main/scala-jdk-9/akka/stream/impl/JavaFlowAndRsConverters.scala @@ -7,7 +7,7 @@ package akka.stream.impl import java.util.concurrent.Flow import JavaFlowAndRsConverters.Implicits._ -import org.{reactivestreams => rs} +import org.{ reactivestreams => rs } import akka.annotation.InternalApi @@ -97,31 +97,35 @@ private[akka] object JavaFlowAndRsConverters { } final def asJava[T, R](p: rs.Processor[T, R]): Flow.Processor[T, R] = p match { - case null => null // null remains null + case null => null // null remains null case adapter: JavaFlowProcessorToRsAdapter[T, R] => adapter.delegate // unwrap adapter instead of wrapping again - case _ => new RsProcessorToJavaFlowAdapter[T, R](p) + case _ => new RsProcessorToJavaFlowAdapter[T, R](p) } final def asRs[T, R](p: Flow.Processor[T, R]): rs.Processor[T, R] = p match { - case null => null // null remains null + case null => null // null remains null case adapter: RsProcessorToJavaFlowAdapter[T, R] => adapter.delegate // unwrap adapter instead of wrapping again - case _ => new JavaFlowProcessorToRsAdapter[T, R](p) + case _ => new JavaFlowProcessorToRsAdapter[T, R](p) } } /** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class JavaFlowPublisherToRsAdapter[T](val delegate: Flow.Publisher[T]) extends rs.Publisher[T] { +@InternalApi private[akka] final class JavaFlowPublisherToRsAdapter[T]( + val delegate: Flow.Publisher[T]) extends rs.Publisher[T] { override def subscribe(rsSubscriber: rs.Subscriber[_ >: T]): Unit = delegate.subscribe(rsSubscriber.asJava) } + /** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class RsPublisherToJavaFlowAdapter[T](val delegate: rs.Publisher[T]) extends Flow.Publisher[T] { +@InternalApi private[akka] final class RsPublisherToJavaFlowAdapter[T]( + val delegate: rs.Publisher[T]) extends Flow.Publisher[T] { override def subscribe(javaSubscriber: Flow.Subscriber[_ >: T]): Unit = delegate.subscribe(javaSubscriber.asRs) } /** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class RsSubscriberToJavaFlowAdapter[T](val delegate: rs.Subscriber[T]) extends Flow.Subscriber[T] { +@InternalApi private[akka] final class RsSubscriberToJavaFlowAdapter[T]( + val delegate: rs.Subscriber[T]) extends Flow.Subscriber[T] { override def onError(t: Throwable): Unit = delegate.onError(t) @@ -134,8 +138,10 @@ private[akka] object JavaFlowAndRsConverters { override def onSubscribe(s: Flow.Subscription): Unit = delegate.onSubscribe(s.asRs) } + /** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class JavaFlowSubscriberToRsAdapter[T](val delegate: Flow.Subscriber[T]) extends rs.Subscriber[T] { +@InternalApi private[akka] final class JavaFlowSubscriberToRsAdapter[T]( + val delegate: Flow.Subscriber[T]) extends rs.Subscriber[T] { override def onError(t: Throwable): Unit = delegate.onError(t) @@ -150,20 +156,24 @@ private[akka] object JavaFlowAndRsConverters { } /** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class RsSubscriptionToJavaFlowAdapter(val delegate: rs.Subscription) extends Flow.Subscription { - override def cancel(): Unit = delegate.cancel() - - override def request(n: Long): Unit = delegate.request(n) -} -/** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class JavaFlowSubscriptionToRsAdapter(val delegate: Flow.Subscription) extends rs.Subscription { +@InternalApi private[akka] final class RsSubscriptionToJavaFlowAdapter( + val delegate: rs.Subscription) extends Flow.Subscription { override def cancel(): Unit = delegate.cancel() override def request(n: Long): Unit = delegate.request(n) } /** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class RsProcessorToJavaFlowAdapter[T, R](val delegate: rs.Processor[T, R]) extends Flow.Processor[T, R] { +@InternalApi private[akka] final class JavaFlowSubscriptionToRsAdapter( + val delegate: Flow.Subscription) extends rs.Subscription { + override def cancel(): Unit = delegate.cancel() + + override def request(n: Long): Unit = delegate.request(n) +} + +/** INTERNAL API: Adapters are not meant to be touched directly */ +@InternalApi private[akka] final class RsProcessorToJavaFlowAdapter[T, R]( + val delegate: rs.Processor[T, R]) extends Flow.Processor[T, R] { override def onError(t: Throwable): Unit = delegate.onError(t) @@ -179,8 +189,10 @@ private[akka] object JavaFlowAndRsConverters { override def subscribe(javaSubscriber: Flow.Subscriber[_ >: R]): Unit = delegate.subscribe(javaSubscriber.asRs) } + /** INTERNAL API: Adapters are not meant to be touched directly */ -@InternalApi private[akka] final class JavaFlowProcessorToRsAdapter[T, R](val delegate: Flow.Processor[T, R]) extends rs.Processor[T, R] { +@InternalApi private[akka] final class JavaFlowProcessorToRsAdapter[T, R]( + val delegate: Flow.Processor[T, R]) extends rs.Processor[T, R] { override def onError(t: Throwable): Unit = delegate.onError(t) diff --git a/akka-stream/src/main/scala-jdk-9/akka/stream/scaladsl/JavaFlowSupport.scala b/akka-stream/src/main/scala-jdk-9/akka/stream/scaladsl/JavaFlowSupport.scala index 2ba449bab5..a9b7d9f011 100644 --- a/akka-stream/src/main/scala-jdk-9/akka/stream/scaladsl/JavaFlowSupport.scala +++ b/akka-stream/src/main/scala-jdk-9/akka/stream/scaladsl/JavaFlowSupport.scala @@ -4,7 +4,7 @@ package akka.stream.scaladsl -import java.util.{concurrent => juc} +import java.util.{ concurrent => juc } import scala.annotation.unchecked.uncheckedVariance @@ -38,10 +38,10 @@ object JavaFlowSupport { * (which carries the same semantics, however existed before RS's inclusion in Java 9). */ final - //#fromPublisher + // #fromPublisher def fromPublisher[T](publisher: java.util.concurrent.Flow.Publisher[T]): Source[T, NotUsed] = - //#fromPublisher - scaladsl.Source.fromPublisher(publisher.asRs) + // #fromPublisher + scaladsl.Source.fromPublisher(publisher.asRs) /** * Creates a `Source` that is materialized as a [[java.util.concurrent.Flow.Subscriber]] @@ -50,9 +50,9 @@ object JavaFlowSupport { * (which carries the same semantics, however existed before RS's inclusion in Java 9). */ final - //#asSubscriber + // #asSubscriber def asSubscriber[T]: Source[T, java.util.concurrent.Flow.Subscriber[T]] = - //#asSubscriber + // #asSubscriber scaladsl.Source.asSubscriber[T].mapMaterializedValue(_.asJava) } @@ -84,17 +84,18 @@ object JavaFlowSupport { * * @return A [[RunnableGraph]] that materializes to a Processor when run() is called on it. */ - def toProcessor[In, Out, Mat](self: Flow[In, Out, Mat]): RunnableGraph[juc.Flow.Processor[In @uncheckedVariance, Out @uncheckedVariance]] = + def toProcessor[In, Out, Mat]( + self: Flow[In, Out, Mat]): RunnableGraph[juc.Flow.Processor[In @uncheckedVariance, Out @uncheckedVariance]] = Source.asSubscriber[In].via(self) .toMat(Sink.asPublisher[Out](fanout = false))(Keep.both) .mapMaterializedValue { case (sub, pub) => new juc.Flow.Processor[In, Out] { - override def onError(t: Throwable): Unit = sub.onError(t) - override def onSubscribe(s: juc.Flow.Subscription): Unit = sub.onSubscribe(s) - override def onComplete(): Unit = sub.onComplete() - override def onNext(t: In): Unit = sub.onNext(t) - override def subscribe(s: juc.Flow.Subscriber[_ >: Out]): Unit = pub.subscribe(s) - } + override def onError(t: Throwable): Unit = sub.onError(t) + override def onSubscribe(s: juc.Flow.Subscription): Unit = sub.onSubscribe(s) + override def onComplete(): Unit = sub.onComplete() + override def onNext(t: In): Unit = sub.onNext(t) + override def subscribe(s: juc.Flow.Subscriber[_ >: Out]): Unit = pub.subscribe(s) + } } } @@ -102,6 +103,7 @@ object JavaFlowSupport { * [[akka.stream.scaladsl.Sink]] factories operating with `java.util.concurrent.Flow.*` interfaces. */ object Sink { + /** * A `Sink` that materializes into a [[java.util.concurrent.Flow.Publisher]]. * diff --git a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala index d5a797fb98..c7d980e081 100644 --- a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala @@ -867,11 +867,12 @@ object StreamSubscriptionTimeoutSettings { */ def apply(config: Config): StreamSubscriptionTimeoutSettings = { val c = config.getConfig("subscription-timeout") - StreamSubscriptionTimeoutSettings(mode = toRootLowerCase(c.getString("mode")) match { - case "no" | "off" | "false" | "noop" => NoopTermination - case "warn" => WarnTermination - case "cancel" => CancelTermination - }, timeout = c.getDuration("timeout", TimeUnit.MILLISECONDS).millis) + StreamSubscriptionTimeoutSettings( + mode = toRootLowerCase(c.getString("mode")) match { + case "no" | "off" | "false" | "noop" => NoopTermination + case "warn" => WarnTermination + case "cancel" => CancelTermination + }, timeout = c.getDuration("timeout", TimeUnit.MILLISECONDS).millis) } } diff --git a/akka-stream/src/main/scala/akka/stream/Attributes.scala b/akka-stream/src/main/scala/akka/stream/Attributes.scala index c1d4515a42..186dbc2275 100644 --- a/akka-stream/src/main/scala/akka/stream/Attributes.scala +++ b/akka-stream/src/main/scala/akka/stream/Attributes.scala @@ -168,7 +168,8 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) { concatNames(i, null, b.append(first).append('-').append(n)) } else concatNames(i, n, null) case _ => concatNames(i, first, buf) - } else if (buf eq null) first + } + else if (buf eq null) first else buf.toString Option(concatNames(attributeList.reverseIterator, null, null)) @@ -311,18 +312,19 @@ object Attributes { * for debugging. Included in the default toString of GraphStageLogic if present */ final class SourceLocation(lambda: AnyRef) extends Attribute { - lazy val locationName: String = try { - val locationName = LineNumbers(lambda) match { - case LineNumbers.NoSourceInfo => "unknown" - case LineNumbers.UnknownSourceFormat(_) => "unknown" - case LineNumbers.SourceFile(filename) => filename - case LineNumbers.SourceFileLines(filename, from, _) => - s"$filename:$from" + lazy val locationName: String = + try { + val locationName = LineNumbers(lambda) match { + case LineNumbers.NoSourceInfo => "unknown" + case LineNumbers.UnknownSourceFormat(_) => "unknown" + case LineNumbers.SourceFile(filename) => filename + case LineNumbers.SourceFileLines(filename, from, _) => + s"$filename:$from" + } + s"${lambda.getClass.getPackage.getName}-$locationName" + } catch { + case NonFatal(_) => "unknown" // location is not critical so give up without failing } - s"${lambda.getClass.getPackage.getName}-$locationName" - } catch { - case NonFatal(_) => "unknown" // location is not critical so give up without failing - } override def toString: String = locationName } @@ -562,10 +564,10 @@ object Attributes { * nested flow materialization. * This applies to [[akka.stream.scaladsl.FlowOps.flatMapPrefix]], [[akka.stream.scaladsl.Flow.futureFlow]] and derived operators. */ - val EagerCancellation - : NestedMaterializationCancellationPolicy = new NestedMaterializationCancellationPolicy(false) { - override def toString: String = "EagerCancellation" - } + val EagerCancellation: NestedMaterializationCancellationPolicy = + new NestedMaterializationCancellationPolicy(false) { + override def toString: String = "EagerCancellation" + } /** * A [[NestedMaterializationCancellationPolicy]] that configures graph stages @@ -676,7 +678,6 @@ object Attributes { * * Configures `log()` operator log-levels to be used when logging. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels( onElement: Logging.LogLevel, @@ -689,7 +690,6 @@ object Attributes { * * Configures `log()` operator log-levels to be used when logging onElement. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels(onElement: Logging.LogLevel): Attributes = logLevels(onElement) @@ -765,7 +765,6 @@ object ActorAttributes { * * Configures `log()` operator log-levels to be used when logging. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels( onElement: Logging.LogLevel, @@ -778,7 +777,6 @@ object ActorAttributes { * * Configures `log()` operator log-levels to be used when logging onElement. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels(onElement: Logging.LogLevel): Attributes = logLevels(onElement) @@ -895,7 +893,8 @@ object ActorAttributes { object StreamRefAttributes { import Attributes._ - /** Attributes specific to stream refs. + /** + * Attributes specific to stream refs. * * Not for user extension. */ diff --git a/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala b/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala index ac0279e484..ecdd23f436 100644 --- a/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala +++ b/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala @@ -13,7 +13,7 @@ import scala.collection.immutable "2.5.5") class FanInShape1N[-T0, -T1, +O](val n: Int, _init: FanInShape.Init[O]) extends FanInShape[O](_init) { - //ports get added to `FanInShape.inlets` as a side-effect of calling `newInlet` + // ports get added to `FanInShape.inlets` as a side-effect of calling `newInlet` val in0: Inlet[T0 @uncheckedVariance] = newInlet[T0]("in0") for (i <- 1 until n) newInlet[T1](s"in$i") @@ -33,8 +33,8 @@ class FanInShape1N[-T0, -T1, +O](val n: Int, _init: FanInShape.Init[O]) extends // cannot deprecate a lazy val because of genjavadoc problem https://github.com/typesafehub/genjavadoc/issues/85 private lazy val _in1Seq: immutable.IndexedSeq[Inlet[T1 @uncheckedVariance]] = - inlets.tail //head is in0 - .toIndexedSeq.asInstanceOf[immutable.IndexedSeq[Inlet[T1]]] + inlets.tail // head is in0 + .toIndexedSeq.asInstanceOf[immutable.IndexedSeq[Inlet[T1]]] def in(n: Int): Inlet[T1 @uncheckedVariance] = { require(n > 0, "n must be > 0") diff --git a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala index ef0fc71638..1be17ae80a 100644 --- a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala +++ b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala @@ -23,7 +23,6 @@ import akka.stream.stage._ * to that materialized Flow itself. * * Creates a [[SharedKillSwitch]] that can be used to externally control the completion of various streams. - * */ object KillSwitches { @@ -109,24 +108,28 @@ object KillSwitches { val logic = new KillableGraphStageLogic(promise.future, shape) { - setHandler(shape.in1, new InHandler { - override def onPush(): Unit = push(shape.out1, grab(shape.in1)) - override def onUpstreamFinish(): Unit = complete(shape.out1) - override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out1, ex) - }) - setHandler(shape.in2, new InHandler { - override def onPush(): Unit = push(shape.out2, grab(shape.in2)) - override def onUpstreamFinish(): Unit = complete(shape.out2) - override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out2, ex) - }) - setHandler(shape.out1, new OutHandler { - override def onPull(): Unit = pull(shape.in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in1, cause) - }) - setHandler(shape.out2, new OutHandler { - override def onPull(): Unit = pull(shape.in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in2, cause) - }) + setHandler(shape.in1, + new InHandler { + override def onPush(): Unit = push(shape.out1, grab(shape.in1)) + override def onUpstreamFinish(): Unit = complete(shape.out1) + override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out1, ex) + }) + setHandler(shape.in2, + new InHandler { + override def onPush(): Unit = push(shape.out2, grab(shape.in2)) + override def onUpstreamFinish(): Unit = complete(shape.out2) + override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out2, ex) + }) + setHandler(shape.out1, + new OutHandler { + override def onPull(): Unit = pull(shape.in1) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in1, cause) + }) + setHandler(shape.out2, + new OutHandler { + override def onPull(): Unit = pull(shape.in2) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in2, cause) + }) } diff --git a/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala b/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala index 0c2b3c03ed..3ae5099b0f 100644 --- a/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala +++ b/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala @@ -14,7 +14,7 @@ sealed abstract class QueueOfferResult { /** * Return ture if the element was already enqueued, otherwise false. - * */ + */ def isEnqueued: Boolean } diff --git a/akka-stream/src/main/scala/akka/stream/Shape.scala b/akka-stream/src/main/scala/akka/stream/Shape.scala index 569bd15253..60dd9eafb3 100644 --- a/akka-stream/src/main/scala/akka/stream/Shape.scala +++ b/akka-stream/src/main/scala/akka/stream/Shape.scala @@ -223,7 +223,7 @@ abstract class Shape { private def nonCorrespondingMessage(s: Shape) = s"The inlets [${s.inlets.mkString(", ")}] and outlets [${s.outlets.mkString(", ")}] must correspond to the inlets [${inlets - .mkString(", ")}] and outlets [${outlets.mkString(", ")}]" + .mkString(", ")}] and outlets [${outlets.mkString(", ")}]" } /** @@ -346,7 +346,7 @@ final case class BidiShape[-In1, +Out1, -In2, +Out2]( in2: Inlet[In2 @uncheckedVariance], out2: Outlet[Out2 @uncheckedVariance]) extends Shape { - //#implementation-details-elided + // #implementation-details-elided override val inlets: immutable.Seq[Inlet[_]] = in1 :: in2 :: Nil override val outlets: immutable.Seq[Outlet[_]] = out1 :: out2 :: Nil @@ -358,7 +358,7 @@ final case class BidiShape[-In1, +Out1, -In2, +Out2]( override def deepCopy(): BidiShape[In1, Out1, In2, Out2] = BidiShape(in1.carbonCopy(), out1.carbonCopy(), in2.carbonCopy(), out2.carbonCopy()) - //#implementation-details-elided + // #implementation-details-elided } //#bidi-shape object BidiShape { diff --git a/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala b/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala index 5f9aa662bd..06edf5e53a 100644 --- a/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala +++ b/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala @@ -20,7 +20,7 @@ object UniformFanInShape { class UniformFanInShape[-T, +O](val n: Int, _init: FanInShape.Init[O]) extends FanInShape[O](_init) { - //ports get added to `FanInShape.inlets` as a side-effect of calling `newInlet` + // ports get added to `FanInShape.inlets` as a side-effect of calling `newInlet` for (i <- 0 until n) newInlet[T](s"in$i") def this(n: Int) = this(n, FanInShape.Name[O]("UniformFanIn")) diff --git a/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala b/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala index 6bc1f63a0e..30eb0cfbb0 100644 --- a/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala +++ b/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala @@ -14,7 +14,7 @@ object UniformFanOutShape { class UniformFanOutShape[-I, +O](n: Int, _init: FanOutShape.Init[I @uncheckedVariance]) extends FanOutShape[I](_init) { - //initialize by side-effect + // initialize by side-effect for (i <- 0 until n) newOutlet[O](s"out$i") def this(n: Int) = this(n, FanOutShape.Name[I]("UniformFanOut")) diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala index 2f548c6ce3..9e4031d3e8 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala @@ -86,14 +86,15 @@ import akka.annotation.InternalApi private def reportSubscribeFailure(subscriber: Subscriber[_ >: T]): Unit = try shutdownReason match { - case Some(_: SpecViolation) => // ok, not allowed to call onError - case Some(e) => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnError(subscriber, e) - case None => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnComplete(subscriber) - } catch { + case Some(_: SpecViolation) => // ok, not allowed to call onError + case Some(e) => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnError(subscriber, e) + case None => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnComplete(subscriber) + } + catch { case _: SpecViolation => // nothing to do } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala index faf5a76923..81b3081cc6 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala @@ -53,7 +53,7 @@ import java.util if (buffer.size() == maxBuffer) tryPull(in) dequeueAndSend() } - case _ => //ignore all other messages + case _ => // ignore all other messages } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala index 4fcd003d4c..ab57f08f6a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala @@ -34,12 +34,12 @@ final private[akka] class ActorRefSinkStage[T]( var completionSignalled = false override def preStart(): Unit = { - getStageActor({ + getStageActor { case (_, Terminated(`ref`)) => completeStage() case msg => log.error("Unexpected message to stage actor {}", msg.getClass) - }).watch(ref) + }.watch(ref) pull(in) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala index 68ba75e4e2..d9ace0bf65 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala @@ -145,11 +145,12 @@ private object ActorRefSource { } } - setHandler(out, new OutHandler { - override def onPull(): Unit = { - tryPush() - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + tryPush() + } + }) } (stage, stage.ref) diff --git a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala index ad777371cd..0822848b5c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala @@ -122,7 +122,7 @@ import akka.util.ByteString private def skipToNextObject(bufSize: Int): Unit = while (pos != -1 && pos < bufSize && pos < maximumObjectLength && depth == 0) { - val outer = outerChars(buffer(pos) & 0xff) + val outer = outerChars(buffer(pos) & 0xFF) start += outer & 1 depth = (outer & 2) >> 1 diff --git a/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala b/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala index b4a71109b4..c98c17b60a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala @@ -40,26 +40,28 @@ import akka.stream.stage._ } override def onPull(): Unit = { - val source = try { - sourceFactory() - } catch { - case NonFatal(ex) => - matPromise.tryFailure(ex) - throw ex - } + val source = + try { + sourceFactory() + } catch { + case NonFatal(ex) => + matPromise.tryFailure(ex) + throw ex + } val subSink = new SubSinkInlet[T]("LazySource") subSink.pull() - setHandler(out, new OutHandler { - override def onPull(): Unit = { - subSink.pull() - } + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + subSink.pull() + } - override def onDownstreamFinish(cause: Throwable): Unit = { - subSink.cancel(cause) - completeStage() - } - }) + override def onDownstreamFinish(cause: Throwable): Unit = { + subSink.cancel(cause) + completeStage() + } + }) subSink.setHandler(new InHandler { override def onPush(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala index 84c15a2df5..4b91ba55e3 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala @@ -834,7 +834,7 @@ private final case class SavedIslandData( else s"out port id [$missingHandlerIdx]" } throw new IllegalStateException(s"No handler defined in stage [${logic.toString}] for $portLabel." + - " All inlets and outlets must be assigned a handler with setHandler in the constructor of your graph stage logic.") + " All inlets and outlets must be assigned a handler with setHandler in the constructor of your graph stage logic.") } override def toString: String = "GraphStagePhase" diff --git a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala index 99bd10a750..0f2e378505 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala @@ -131,9 +131,10 @@ import akka.stream.SubscriptionWithCancelException if (subscription eq null) throw new IllegalStateException("Subscription must be not null on cancel() call, rule 1.3") try subscription match { - case s: SubscriptionWithCancelException => s.cancel(cause) - case s => s.cancel() - } catch { + case s: SubscriptionWithCancelException => s.cancel(cause) + case s => s.cancel() + } + catch { case NonFatal(t) => throw new SignalThrewException("It is illegal to throw exceptions from cancel(), rule 3.15", t) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala b/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala index 5d409df4a7..d73db64f33 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala @@ -112,11 +112,12 @@ import akka.util.OptionVal } }) - setHandler(externalOut, new OutHandler { - override def onPull(): Unit = - // external demand - if (!hasBeenPulled(internalIn)) pull(internalIn) - }) + setHandler(externalOut, + new OutHandler { + override def onPull(): Unit = + // external demand + if (!hasBeenPulled(internalIn)) pull(internalIn) + }) private def pushInternal(element: In): Unit = { push(internalOut, element) diff --git a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala index b0262a7130..9fc4fc6281 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala @@ -155,34 +155,34 @@ import akka.util.ccompat._ override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { val p: Promise[immutable.Seq[T]] = Promise() (new GraphStageLogic(shape) with InHandler { - private[this] val buffer = mutable.Queue.empty[T] - private[this] var count = 0 + private[this] val buffer = mutable.Queue.empty[T] + private[this] var count = 0 - override def preStart(): Unit = pull(in) + override def preStart(): Unit = pull(in) - override def onPush(): Unit = { - buffer.enqueue(grab(in)) - if (count < n) - count += 1 - else - buffer.dequeue() - pull(in) - } + override def onPush(): Unit = { + buffer.enqueue(grab(in)) + if (count < n) + count += 1 + else + buffer.dequeue() + pull(in) + } - override def onUpstreamFinish(): Unit = { - val elements = buffer.toList - buffer.clear() - p.trySuccess(elements) - completeStage() - } + override def onUpstreamFinish(): Unit = { + val elements = buffer.toList + buffer.clear() + p.trySuccess(elements) + completeStage() + } - override def onUpstreamFailure(ex: Throwable): Unit = { - p.tryFailure(ex) - failStage(ex) - } + override def onUpstreamFailure(ex: Throwable): Unit = { + p.tryFailure(ex) + failStage(ex) + } - setHandler(in, this) - }, p.future) + setHandler(in, this) + }, p.future) } override def toString: String = "TakeLastStage" @@ -201,29 +201,29 @@ import akka.util.ccompat._ override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { val p: Promise[Option[T]] = Promise() (new GraphStageLogic(shape) with InHandler { - override def preStart(): Unit = pull(in) + override def preStart(): Unit = pull(in) - def onPush(): Unit = { - p.trySuccess(Option(grab(in))) - completeStage() - } + def onPush(): Unit = { + p.trySuccess(Option(grab(in))) + completeStage() + } - override def onUpstreamFinish(): Unit = { - p.trySuccess(None) - completeStage() - } + override def onUpstreamFinish(): Unit = { + p.trySuccess(None) + completeStage() + } - override def onUpstreamFailure(ex: Throwable): Unit = { - p.tryFailure(ex) - failStage(ex) - } + override def onUpstreamFailure(ex: Throwable): Unit = { + p.tryFailure(ex) + failStage(ex) + } - override def postStop(): Unit = { - if (!p.isCompleted) p.failure(new AbruptStageTerminationException(this)) - } + override def postStop(): Unit = { + if (!p.isCompleted) p.failure(new AbruptStageTerminationException(this)) + } - setHandler(in, this) - }, p.future) + setHandler(in, this) + }, p.future) } override def toString: String = "HeadOptionStage" @@ -319,7 +319,7 @@ import akka.util.ccompat._ if (currentRequests.isFull) pullPromise.failure( new IllegalStateException(s"Too many concurrent pulls. Specified maximum is $maxConcurrentPulls. " + - "You have to wait for one previous future to be resolved to send another request")) + "You have to wait for one previous future to be resolved to send another request")) else if (buffer.isEmpty) currentRequests.enqueue(pullPromise) else { if (buffer.used == maxBuffer) tryPull(in) @@ -332,7 +332,7 @@ import akka.util.ccompat._ val e = buffer.dequeue() promise.complete(e) e match { - case Success(_: Some[_]) => //do nothing + case Success(_: Some[_]) => // do nothing case Success(None) => completeStage() case Failure(t) => failStage(t) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala index 13306332a8..b0f5da053c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala @@ -69,7 +69,6 @@ import akka.util.OptionVal * downstream and upstream, this needs an atomic state machine which looks a * little like this: * - * * +--------+ (2) +---------------+ * | null +------------>+ Subscriber | * +---+----+ +-----+---------+ @@ -92,7 +91,6 @@ import akka.util.OptionVal * | Publisher +-----> | Inert | | (5, *) * +--------------+ +---------------+ <-- * - * * The idea is to keep the major state in only one atomic reference. The actions * that can happen are: * @@ -324,10 +322,12 @@ import akka.util.OptionVal if (!compareAndSet(x, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec() case s: Subscriber[_] => try s.onError(ex) - catch { case NonFatal(_) => } finally set(Inert) + catch { case NonFatal(_) => } + finally set(Inert) case Both(s) => try s.onError(ex) - catch { case NonFatal(_) => } finally set(Inert) + catch { case NonFatal(_) => } + finally set(Inert) case _ => // spec violation or cancellation race, but nothing we can do } rec() diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala b/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala index 42e76c9c32..775c03c324 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala @@ -122,14 +122,15 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff // if we are at end-of-stream and have nothing more to read we complete now rather than after the next `requestMore` if ((eos ne NotReached) && buffer.count(subscription) == 0) Long.MinValue else 0 } else if (buffer.count(subscription) > 0) { - val goOn = try { - subscription.dispatch(buffer.read(subscription)) - true - } catch { - case _: SpecViolation => - unregisterSubscriptionInternal(subscription) - false - } + val goOn = + try { + subscription.dispatch(buffer.read(subscription)) + true + } catch { + case _: SpecViolation => + unregisterSubscriptionInternal(subscription) + false + } if (goOn) dispatchFromBufferAndReturnRemainingRequested(requested - 1, eos) else Long.MinValue } else if (eos ne NotReached) Long.MinValue diff --git a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala index df7da8bf37..9acca4d121 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala @@ -42,7 +42,7 @@ import scala.concurrent.duration.{ FiniteDuration, _ } // 100 ms is a realistic minimum between tokens, otherwise the maximumBurst is adjusted // to be able to support higher rates val effectiveMaximumBurst: Long = - if (maximumBurst == Throttle.AutomaticMaximumBurst) math.max(1, ((100 * 1000 * 1000) / nanosBetweenTokens)) + if (maximumBurst == Throttle.AutomaticMaximumBurst) math.max(1, (100 * 1000 * 1000) / nanosBetweenTokens) else maximumBurst require(!(mode == ThrottleMode.Enforcing && effectiveMaximumBurst < 0), "maximumBurst must be > 0 in Enforcing mode") diff --git a/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala b/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala index 1264231e51..0e70ac0ebf 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala @@ -200,8 +200,9 @@ import akka.annotation.InternalApi // Generate upstream requestMore for every Nth consumed input element final def pump(): Unit = { try while (transferState.isExecutable) { - currentAction() - } catch { case NonFatal(e) => pumpFailed(e) } + currentAction() + } + catch { case NonFatal(e) => pumpFailed(e) } if (isPumpFinished) pumpFinished() } diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala index 151d380ede..c185f8f09b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala @@ -357,14 +357,15 @@ import akka.util.OptionVal private def reportSubscribeFailure(subscriber: Subscriber[Any]): Unit = try shutdownReason match { - case OptionVal.Some(_: SpecViolation) => // ok, not allowed to call onError - case OptionVal.Some(e) => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnError(subscriber, e) - case _ => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnComplete(subscriber) - } catch { + case OptionVal.Some(_: SpecViolation) => // ok, not allowed to call onError + case OptionVal.Some(e) => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnError(subscriber, e) + case _ => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnComplete(subscriber) + } + catch { case _: SpecViolation => // nothing to do } @@ -558,13 +559,14 @@ import akka.util.OptionVal private var enqueueToShortCircuit: (Any) => Unit = _ lazy val interpreter: GraphInterpreter = - new GraphInterpreter(mat, log, logics, connections, (logic, event, promise, handler) => { - val asyncInput = AsyncInput(this, logic, event, promise, handler) - val currentInterpreter = GraphInterpreter.currentInterpreterOrNull - if (currentInterpreter == null || (currentInterpreter.context ne self)) - self ! asyncInput - else enqueueToShortCircuit(asyncInput) - }, attributes.mandatoryAttribute[ActorAttributes.FuzzingMode].enabled, self) + new GraphInterpreter(mat, log, logics, connections, + (logic, event, promise, handler) => { + val asyncInput = AsyncInput(this, logic, event, promise, handler) + val currentInterpreter = GraphInterpreter.currentInterpreterOrNull + if (currentInterpreter == null || (currentInterpreter.context ne self)) + self ! asyncInput + else enqueueToShortCircuit(asyncInput) + }, attributes.mandatoryAttribute[ActorAttributes.FuzzingMode].enabled, self) // TODO: really needed? private var subscribesPending = 0 @@ -654,9 +656,10 @@ import akka.util.OptionVal else { waitingForShutdown = true val subscriptionTimeout = attributes.mandatoryAttribute[ActorAttributes.StreamSubscriptionTimeout].timeout - mat.scheduleOnce(subscriptionTimeout, new Runnable { - override def run(): Unit = self ! Abort(GraphInterpreterShell.this) - }) + mat.scheduleOnce(subscriptionTimeout, + new Runnable { + override def run(): Unit = self ! Abort(GraphInterpreterShell.this) + }) } } else if (interpreter.isSuspended && !resumeScheduled) sendResume(!usingShellLimit) @@ -738,10 +741,10 @@ import akka.util.OptionVal false } - //this limits number of messages that can be processed synchronously during one actor receive. + // this limits number of messages that can be processed synchronously during one actor receive. private val eventLimit: Int = _initial.attributes.mandatoryAttribute[ActorAttributes.SyncProcessingLimit].limit private var currentLimit: Int = eventLimit - //this is a var in order to save the allocation when no short-circuiting actually happens + // this is a var in order to save the allocation when no short-circuiting actually happens private var shortCircuitBuffer: util.ArrayDeque[Any] = null def enqueueToShortCircuit(input: Any): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala index 7e7dfa8482..7c5d76070c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala @@ -43,7 +43,7 @@ import akka.util.OptionVal setHandlers(in, out, this) override def postStop(): Unit = { - //this covers the case when the nested flow was never materialized + // this covers the case when the nested flow was never materialized if (!matPromise.isCompleted) { matPromise.failure(new AbruptStageTerminationException(this)) } @@ -58,7 +58,7 @@ import akka.util.OptionVal if (accumulated.size == n) { materializeFlow() } else { - //gi'me some more! + // gi'me some more! pull(in) } } @@ -75,7 +75,7 @@ import akka.util.OptionVal subSource match { case OptionVal.Some(s) => s.fail(ex) case _ => - //flow won't be materialized, so we have to complete the future with a failure indicating this + // flow won't be materialized, so we have to complete the future with a failure indicating this matPromise.failure(new NeverMaterializedException(ex)) super.onUpstreamFailure(ex) } @@ -84,12 +84,12 @@ import akka.util.OptionVal override def onPull(): Unit = { subSink match { case OptionVal.Some(s) => - //delegate to subSink + // delegate to subSink s.pull() case _ => if (accumulated.size < n) pull(in) else if (accumulated.size == n) { - //corner case for n = 0, can be handled in FlowOps + // corner case for n = 0, can be handled in FlowOps materializeFlow() } else { throw new IllegalStateException(s"Unexpected accumulated size: ${accumulated.size} (n: $n)") @@ -104,9 +104,9 @@ import akka.util.OptionVal if (propagateToNestedMaterialization) { downstreamCause = OptionVal.Some(cause) if (accumulated.size == n) { - //corner case for n = 0, can be handled in FlowOps + // corner case for n = 0, can be handled in FlowOps materializeFlow() - } else if (!hasBeenPulled(in)) { //if in was already closed, nested flow would have already been materialized + } else if (!hasBeenPulled(in)) { // if in was already closed, nested flow would have already been materialized pull(in) } } else { @@ -153,31 +153,32 @@ import akka.util.OptionVal } } } - val matVal = try { - val flow = f(prefix) - val runnableGraph = Source.fromGraph(theSubSource.source).viaMat(flow)(Keep.right).to(theSubSink.sink) - interpreter.subFusingMaterializer.materialize(runnableGraph, inheritedAttributes) - } catch { - case NonFatal(ex) => - matPromise.failure(new NeverMaterializedException(ex)) - subSource = OptionVal.None - subSink = OptionVal.None - throw ex - } + val matVal = + try { + val flow = f(prefix) + val runnableGraph = Source.fromGraph(theSubSource.source).viaMat(flow)(Keep.right).to(theSubSink.sink) + interpreter.subFusingMaterializer.materialize(runnableGraph, inheritedAttributes) + } catch { + case NonFatal(ex) => + matPromise.failure(new NeverMaterializedException(ex)) + subSource = OptionVal.None + subSink = OptionVal.None + throw ex + } matPromise.success(matVal) - //in case downstream was closed + // in case downstream was closed downstreamCause match { case OptionVal.Some(ex) => theSubSink.cancel(ex) case _ => } - //in case we've materialized due to upstream completion + // in case we've materialized due to upstream completion if (isClosed(in)) { theSubSource.complete() } - //in case we've been pulled by downstream + // in case we've been pulled by downstream if (isAvailable(out)) { theSubSink.pull() } diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala index 85347c2b44..c467ad4a2d 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala @@ -36,7 +36,7 @@ import scala.util.{ Failure, Success, Try } val innerMatValue = Promise[M]() val logic = new GraphStageLogic(shape) { - //seems like we must set handlers BEFORE preStart + // seems like we must set handlers BEFORE preStart setHandlers(in, out, Initializing) override def preStart(): Unit = { @@ -46,7 +46,7 @@ import scala.util.{ Failure, Success, Try } case None => val cb = getAsyncCallback(Initializing.onFuture) futureFlow.onComplete(cb.invoke)(ExecutionContexts.parasitic) - //in case both ports are closed before future completion + // in case both ports are closed before future completion setKeepGoing(true) } } @@ -66,10 +66,10 @@ import scala.util.{ Failure, Success, Try } upstreamFailure = OptionVal.Some(ex) } - //will later be propagated to the materialized flow (by examining isClosed(in)) + // will later be propagated to the materialized flow (by examining isClosed(in)) override def onUpstreamFinish(): Unit = {} - //will later be propagated to the materialized flow (by examining isAvailable(out)) + // will later be propagated to the materialized flow (by examining isAvailable(out)) override def onPull(): Unit = {} var downstreamCause = OptionVal.none[Throwable] @@ -88,7 +88,7 @@ import scala.util.{ Failure, Success, Try } innerMatValue.failure(new NeverMaterializedException(exception)) failStage(exception) case Success(flow) => - //materialize flow, connect inlet and outlet, feed with potential events and set handlers + // materialize flow, connect inlet and outlet, feed with potential events and set handlers connect(flow) setKeepGoing(false) } @@ -123,13 +123,14 @@ import scala.util.{ Failure, Success, Try } case OptionVal.Some(cause) => subSink.cancel(cause) case _ => if (isAvailable(out)) subSink.pull() } - setHandlers(in, out, new InHandler with OutHandler { - override def onPull(): Unit = subSink.pull() - override def onDownstreamFinish(cause: Throwable): Unit = subSink.cancel(cause) - override def onPush(): Unit = subSource.push(grab(in)) - override def onUpstreamFinish(): Unit = subSource.complete() - override def onUpstreamFailure(ex: Throwable): Unit = subSource.fail(ex) - }) + setHandlers(in, out, + new InHandler with OutHandler { + override def onPull(): Unit = subSink.pull() + override def onDownstreamFinish(cause: Throwable): Unit = subSink.cancel(cause) + override def onPush(): Unit = subSource.push(grab(in)) + override def onUpstreamFinish(): Unit = subSource.complete() + override def onUpstreamFailure(ex: Throwable): Unit = subSource.fail(ex) + }) } catch { case NonFatal(ex) => innerMatValue.failure(new NeverMaterializedException(ex)) diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala index 7f9fad6f81..dbfa43c019 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala @@ -43,11 +43,11 @@ import akka.stream.stage._ final val PullStartFlip = 3 // 0011 final val PullEndFlip = 10 // 1010 - final val PushStartFlip = 12 //1100 - final val PushEndFlip = 5 //0101 + final val PushStartFlip = 12 // 1100 + final val PushEndFlip = 5 // 0101 final val KeepGoingFlag = 0x4000000 - final val KeepGoingMask = 0x3ffffff + final val KeepGoingMask = 0x3FFFFFF /** * Marker object that indicates that a port holds no element since it was already grabbed. The port is still pullable, @@ -504,7 +504,8 @@ import akka.stream.stage._ activeStage = connection.outOwner if (Debug) println( - s"$Name CANCEL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") + s"$Name CANCEL ${inOwnerName(connection)} -> ${outOwnerName( + connection)} (${connection.outHandler}) [${outLogicName(connection)}]") connection.portState |= OutClosed completeConnection(connection.outOwner.stageId) val cause = connection.slot.asInstanceOf[Cancelled].cause @@ -517,7 +518,8 @@ import akka.stream.stage._ // Normal completion (no push pending) if (Debug) println( - s"$Name COMPLETE ${outOwnerName(connection)} -> ${inOwnerName(connection)} (${connection.inHandler}) [${inLogicName(connection)}]") + s"$Name COMPLETE ${outOwnerName(connection)} -> ${inOwnerName( + connection)} (${connection.inHandler}) [${inLogicName(connection)}]") connection.portState |= InClosed activeStage = connection.inOwner completeConnection(connection.inOwner.stageId) @@ -536,7 +538,8 @@ import akka.stream.stage._ private def processPush(connection: Connection): Unit = { if (Debug) println( - s"$Name PUSH ${outOwnerName(connection)} -> ${inOwnerName(connection)}, ${connection.slot} (${connection.inHandler}) [${inLogicName(connection)}]") + s"$Name PUSH ${outOwnerName(connection)} -> ${inOwnerName( + connection)}, ${connection.slot} (${connection.inHandler}) [${inLogicName(connection)}]") activeStage = connection.inOwner connection.portState ^= PushEndFlip connection.inHandler.onPush() @@ -546,7 +549,8 @@ import akka.stream.stage._ private def processPull(connection: Connection): Unit = { if (Debug) println( - s"$Name PULL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") + s"$Name PULL ${inOwnerName(connection)} -> ${outOwnerName( + connection)} (${connection.outHandler}) [${outLogicName(connection)}]") activeStage = connection.outOwner connection.portState ^= PullEndFlip connection.outHandler.onPull() diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala index 1e8d32578f..41f6e7e42a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala @@ -126,36 +126,36 @@ import akka.stream.stage._ val finishPromise = Promise[Done]() (new GraphStageLogic(shape) with InHandler with OutHandler { - def onPush(): Unit = push(out, grab(in)) + def onPush(): Unit = push(out, grab(in)) - override def onUpstreamFinish(): Unit = { - finishPromise.success(Done) - completeStage() - } - - override def onUpstreamFailure(ex: Throwable): Unit = { - finishPromise.failure(ex) - failStage(ex) - } - - def onPull(): Unit = pull(in) - - override def onDownstreamFinish(cause: Throwable): Unit = { - cause match { - case _: SubscriptionWithCancelException.NonFailureCancellation => - finishPromise.success(Done) - case ex => - finishPromise.failure(ex) + override def onUpstreamFinish(): Unit = { + finishPromise.success(Done) + completeStage() } - cancelStage(cause) - } - override def postStop(): Unit = { - if (!finishPromise.isCompleted) finishPromise.failure(new AbruptStageTerminationException(this)) - } + override def onUpstreamFailure(ex: Throwable): Unit = { + finishPromise.failure(ex) + failStage(ex) + } - setHandlers(in, out, this) - }, finishPromise.future) + def onPull(): Unit = pull(in) + + override def onDownstreamFinish(cause: Throwable): Unit = { + cause match { + case _: SubscriptionWithCancelException.NonFailureCancellation => + finishPromise.success(Done) + case ex => + finishPromise.failure(ex) + } + cancelStage(cause) + } + + override def postStop(): Unit = { + if (!finishPromise.isCompleted) finishPromise.failure(new AbruptStageTerminationException(this)) + } + + setHandlers(in, out, this) + }, finishPromise.future) } override def toString = "TerminationWatcher" diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala index 296ed44a23..06e95601de 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala @@ -26,7 +26,7 @@ import akka.stream.Attributes.{ InputBuffer, LogLevels } import akka.stream.Attributes.SourceLocation import akka.stream.OverflowStrategies._ import akka.stream.Supervision.Decider -import akka.stream.impl.{ ContextPropagation, ReactiveStreamsCompliance, Buffer => BufferImpl } +import akka.stream.impl.{ Buffer => BufferImpl, ContextPropagation, ReactiveStreamsCompliance } import akka.stream.impl.Stages.DefaultAttributes import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage import akka.stream.scaladsl.{ DelayStrategy, Source } @@ -93,7 +93,8 @@ import akka.util.ccompat._ } else { buffer = OptionVal.Some(elem) contextPropagation.suspendContext() - } else pull(in) + } + else pull(in) } catch { case NonFatal(ex) => decider(ex) match { @@ -254,7 +255,7 @@ private[stream] object Collect { case result: Out @unchecked => push(out, result) case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser } - case None => //do nothing + case None => // do nothing } override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) @@ -297,17 +298,18 @@ private[stream] object Collect { override def onUpstreamFailure(ex: Throwable): Unit = try pf.applyOrElse(ex, NotApplied) match { - case NotApplied => failStage(ex) - case result: T @unchecked => { - if (isAvailable(out)) { - push(out, result) - completeStage() - } else { - recovered = Some(result) + case NotApplied => failStage(ex) + case result: T @unchecked => { + if (isAvailable(out)) { + push(out, result) + completeStage() + } else { + recovered = Some(result) + } } + case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser } - case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser - } catch { + catch { case NonFatal(ex) => failStage(ex) } @@ -408,7 +410,6 @@ private[stream] object Collect { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { self => - private var aggregator = zero private lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider @@ -416,12 +417,13 @@ private[stream] object Collect { import shape.{ in, out } // Initial behavior makes sure that the zero gets flushed if upstream is empty - setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, aggregator) - setHandlers(in, out, self) - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + push(out, aggregator) + setHandlers(in, out, self) + } + }) setHandler( in, @@ -429,12 +431,13 @@ private[stream] object Collect { override def onPush(): Unit = () override def onUpstreamFinish(): Unit = - setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, aggregator) - completeStage() - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + push(out, aggregator) + completeStage() + } + }) }) override def onPull(): Unit = pull(in) @@ -475,7 +478,6 @@ private[stream] object Collect { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { self => - private var current: Out = zero private var elementHandled: Boolean = false @@ -492,12 +494,13 @@ private[stream] object Collect { } override def onUpstreamFinish(): Unit = - setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, current) - completeStage() - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + push(out, current) + completeStage() + } + }) } private def onRestart(): Unit = { @@ -847,7 +850,7 @@ private[stream] object Collect { case Some(weight) => left -= weight if (left >= 0) push(out, elem) else failStage(new StreamLimitReachedException(n)) - case None => //do nothing + case None => // do nothing } } @@ -2056,9 +2059,10 @@ private[akka] object TakeWithin { } else { push(out, grab(in)) // change the in handler to avoid System.nanoTime call after timeout - setHandler(in, new InHandler { - def onPush() = push(out, grab(in)) - }) + setHandler(in, + new InHandler { + def onPush() = push(out, grab(in)) + }) } } @@ -2089,16 +2093,17 @@ private[akka] object TakeWithin { def setInitialInHandler(): Unit = { // Initial input handler - setHandler(in, new InHandler { - override def onPush(): Unit = { - aggregator = grab(in) - pull(in) - setHandler(in, self) - } + setHandler(in, + new InHandler { + override def onPush(): Unit = { + aggregator = grab(in) + pull(in) + setHandler(in, self) + } - override def onUpstreamFinish(): Unit = - failStage(new NoSuchElementException("reduce over empty stream")) - }) + override def onUpstreamFinish(): Unit = + failStage(new NoSuchElementException("reduce over empty stream")) + }) } @nowarn // compiler complaining about aggregator = _: T @@ -2150,15 +2155,17 @@ private[akka] object TakeWithin { override def createLogic(attr: Attributes) = new GraphStageLogic(shape) { var attempt = 0 - setHandler(in, new InHandler { - override def onPush(): Unit = push(out, grab(in)) + setHandler(in, + new InHandler { + override def onPush(): Unit = push(out, grab(in)) - override def onUpstreamFailure(ex: Throwable) = onFailure(ex) - }) + override def onUpstreamFailure(ex: Throwable) = onFailure(ex) + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) def onFailure(ex: Throwable) = if ((maximumRetries < 0 || attempt < maximumRetries) && pf.isDefinedAt(ex)) { diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala index 0a97585821..bd5a0d14cd 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala @@ -74,12 +74,13 @@ import akka.util.ccompat.JavaConverters._ override def onUpstreamFinish(): Unit = if (activeSources == 0) completeStage() override def onPull(): Unit = { pull(in) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - // could be unavailable due to async input having been executed before this notification - if (queue.nonEmpty && isAvailable(out)) pushOut() - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + // could be unavailable due to async input having been executed before this notification + if (queue.nonEmpty && isAvailable(out)) pushOut() + } + }) } setHandlers(in, out, this) @@ -267,7 +268,6 @@ import akka.util.ccompat.JavaConverters._ override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with OutHandler with InHandler { parent => - lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider private val activeSubstreamsMap = new java.util.HashMap[Any, SubstreamSource]() private val closedSubstreams = @@ -505,7 +505,7 @@ import akka.util.ccompat.JavaConverters._ new OutHandler { override def onPull(): Unit = { if (substreamSource eq null) { - //can be already pulled from substream in case split after + // can be already pulled from substream in case split after if (!hasBeenPulled(in)) pull(in) } else if (substreamWaitingToBePushed) pushSubstreamSource() } @@ -685,7 +685,7 @@ import akka.util.ccompat.JavaConverters._ override def initialAttributes = Attributes.name(s"SubSink($name)") override val shape = SinkShape(in) - private val status = new AtomicReference[ /* State */ AnyRef](Uninitialized) + private val status = new AtomicReference[/* State */ AnyRef](Uninitialized) def pullSubstream(): Unit = dispatchCommand(RequestOneScheduledBeforeMaterialization) def cancelSubstream(): Unit = cancelSubstream(SubscriptionWithCancelException.NoMoreElementsNeeded) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala index 42b456c62b..4039ef8ee1 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala @@ -214,11 +214,11 @@ import akka.util.ByteString } else throw NeedMoreData def readShortLE(): Int = readByte() | (readByte() << 8) def readIntLE(): Int = readShortLE() | (readShortLE() << 16) - def readLongLE(): Long = (readIntLE() & 0XFFFFFFFFL) | ((readIntLE() & 0XFFFFFFFFL) << 32) + def readLongLE(): Long = (readIntLE() & 0xFFFFFFFFL) | ((readIntLE() & 0xFFFFFFFFL) << 32) def readShortBE(): Int = (readByte() << 8) | readByte() def readIntBE(): Int = (readShortBE() << 16) | readShortBE() - def readLongBE(): Long = ((readIntBE() & 0XFFFFFFFFL) << 32) | (readIntBE() & 0XFFFFFFFFL) + def readLongBE(): Long = ((readIntBE() & 0xFFFFFFFFL) << 32) | (readIntBE() & 0xFFFFFFFFL) def skip(numBytes: Int): Unit = if (off + numBytes <= input.length) off += numBytes diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala index 967775d93e..34cdfac0f5 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala @@ -82,7 +82,7 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: override def onPull(): Unit = { if (availableChunks.size < maxReadAhead && !eofEncountered) availableChunks = readAhead(maxReadAhead, availableChunks) - //if already read something and try + // if already read something and try if (availableChunks.nonEmpty) { emitMultiple(out, availableChunks.iterator, () => if (eofEncountered) success() else setHandler(out, handler)) availableChunks = Vector.empty[ByteString] @@ -97,13 +97,14 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: /** BLOCKING I/O READ */ @tailrec def readAhead(maxChunks: Int, chunks: Vector[ByteString]): Vector[ByteString] = if (chunks.size < maxChunks && !eofEncountered) { - val readBytes = try channel.read(buffer, position) - catch { - case NonFatal(ex) => - failStage(ex) - ioResultPromise.trySuccess(IOResult(position, Failure(ex))) - throw ex - } + val readBytes = + try channel.read(buffer, position) + catch { + case NonFatal(ex) => + failStage(ex) + ioResultPromise.trySuccess(IOResult(position, Failure(ex))) + throw ex + } if (readBytes > 0) { buffer.flip() diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala index 06268dc72a..332dcbd885 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala @@ -75,7 +75,7 @@ private[stream] object InputStreamSinkStage { } def onPush(): Unit = { - //1 is buffer for Finished or Failed callback + // 1 is buffer for Finished or Failed callback require(dataQueue.remainingCapacity() > 1) val bs = grab(in) if (bs.nonEmpty) { @@ -135,7 +135,7 @@ private[stream] object InputStreamSinkStage { override def read(): Int = { val a = new Array[Byte](1) read(a, 0, 1) match { - case 1 => a(0) & 0xff + case 1 => a(0) & 0xFF case -1 => -1 case len => throw new IllegalStateException(s"Invalid length [$len]") } diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala index 81b4a0f0e4..a2b969a2d1 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala @@ -368,7 +368,7 @@ import akka.util.ByteString if (tracing) log.debug( s"wrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${userInBuffer.remaining} out=${transportOutBuffer - .position()}") + .position()}") if (lastHandshakeStatus == FINISHED) handshakeFinished() runDelegatedTasks() @@ -402,7 +402,7 @@ import akka.util.ByteString if (tracing) log.debug( s"unwrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${transportInBuffer.remaining} out=${userOutBuffer - .position()}") + .position()}") runDelegatedTasks() result.getStatus match { case OK => diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala index f9cb85ebd0..17406e1eb9 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala @@ -23,7 +23,7 @@ import akka.io.Tcp._ import akka.stream._ import akka.stream.impl.ReactiveStreamsCompliance import akka.stream.impl.fusing.GraphStages.detacher -import akka.stream.scaladsl.{ BidiFlow, Flow, TcpIdleTimeoutException, Tcp => StreamTcp } +import akka.stream.scaladsl.{ BidiFlow, Flow, Tcp => StreamTcp, TcpIdleTimeoutException } import akka.stream.scaladsl.Tcp.{ OutgoingConnection, ServerBinding } import akka.stream.scaladsl.TcpAttributes import akka.stream.stage._ @@ -40,7 +40,8 @@ import akka.util.ByteString val halfClose: Boolean, val idleTimeout: Duration, val bindShutdownTimeout: FiniteDuration) - extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[StreamTcp.ServerBinding]] { + extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[ + StreamTcp.ServerBinding]] { import ConnectionSourceStage._ val out: Outlet[StreamTcp.IncomingConnection] = Outlet("IncomingConnections.out") @@ -77,16 +78,17 @@ import akka.util.ByteString stageActor.watch(listener) if (isAvailable(out)) listener ! ResumeAccepting(1) val thisStage = self - bindingPromise.success(ServerBinding(localAddress)(() => { - // To allow unbind() to be invoked multiple times with minimal chance of dead letters, we check if - // it's already unbound before sending the message. - if (!unbindPromise.isCompleted) { - // Beware, sender must be explicit since stageActor.ref will be invalid to access after the stage - // stopped. - thisStage.tell(Unbind, thisStage) - } - unbindPromise.future - }, unbindPromise.future.map(_ => Done)(ExecutionContexts.parasitic))) + bindingPromise.success(ServerBinding(localAddress)( + () => { + // To allow unbind() to be invoked multiple times with minimal chance of dead letters, we check if + // it's already unbound before sending the message. + if (!unbindPromise.isCompleted) { + // Beware, sender must be explicit since stageActor.ref will be invalid to access after the stage + // stopped. + thisStage.tell(Unbind, thisStage) + } + unbindPromise.future + }, unbindPromise.future.map(_ => Done)(ExecutionContexts.parasitic))) case f: CommandFailed => val ex = new BindFailedException { // cannot modify the actual exception class for compatibility reasons diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala index 0bd1f5cbf9..b03b128330 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala @@ -82,5 +82,5 @@ import akka.util.ByteString 0, // MTIME 4 0, // XFL 0 // OS - ) + ) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/package.scala b/akka-stream/src/main/scala/akka/stream/impl/package.scala index 249aaca27d..f735bb78cb 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/package.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/package.scala @@ -458,6 +458,5 @@ package akka.stream * * [[akka.stream.impl.TraversalBuilder.printTraversal]]: Prints the Traversal in a readable format * * [[akka.stream.impl.TraversalBuilder.printWiring]]: Prints the calculated port assignments. Useful for * debugging if everything is wired to the right thing. - * */ package object impl {} diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala index b4fe97030c..c8f23095d8 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala @@ -46,7 +46,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlowsMat[I1, O1, I2, O2, M1, M2, M]( flow1: Graph[FlowShape[I1, O1], M1], @@ -71,7 +70,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlows[I1, O1, I2, O2, M1, M2]( flow1: Graph[FlowShape[I1, O1], M1], diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala index 0210d91739..c7274dad75 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala @@ -387,7 +387,8 @@ object Flow { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * * */ + * * + */ def flattenOptional[Out, In <: Optional[Out]](): Flow[In, Out, NotUsed] = new Flow(scaladsl.Flow[In].collect { case optional: Optional[Out @unchecked] if optional.isPresent => optional.get() @@ -679,7 +680,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: function.Procedure[Out]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.wireTap(f(_))) @@ -936,7 +936,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.filter(p.test)) @@ -1676,7 +1675,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.mapError(pf)) @@ -1698,7 +1696,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable](clazz: Class[E], f: function.Function[E, Throwable]): javadsl.Flow[In, Out, Mat] = mapError { @@ -1723,7 +1720,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ @nowarn("msg=deprecated") def recoverWith(pf: PartialFunction[Throwable, _ <: Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = @@ -1819,9 +1815,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr attempts: Int, clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = - recoverWithRetries(attempts, { - case elem if clazz.isInstance(elem) => supplier.get() - }) + recoverWithRetries(attempts, + { + case elem if clazz.isInstance(elem) => supplier.get() + }) /** * Terminate processing (and cancel the upstream publisher) after the given @@ -1917,7 +1914,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @param seed Provides the first state for a conflated value using the first unconsumed element as a start * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflateWithSeed[S]( seed: function.Function[Out, S], @@ -1948,7 +1944,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * see also [[Flow.conflateWithSeed]] [[Flow.batch]] [[Flow.batchWeighted]] * * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflate(aggregate: function.Function2[Out, Out, Out]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.conflate(aggregate.apply)) @@ -2172,7 +2167,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): javadsl.Flow[In, Out2, Mat] = { @@ -2916,7 +2911,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new Flow(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -2997,7 +2993,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new javadsl.Flow(delegate.mergeAll(seq, eagerComplete)) } @@ -3152,13 +3149,14 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = this.viaMat( Flow.fromGraph( - GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { - def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { - val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T]) - b.from(s).toInlet(zip.in1) - FlowShape(zip.in0, zip.out) - } - })), + GraphDSL.create(that, + new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { + def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { + val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T]) + b.from(s).toInlet(zip.in1) + FlowShape(zip.in0, zip.out) + } + })), matF) /** @@ -3224,13 +3222,14 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = this.viaMat( Flow.fromGraph( - GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { - def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { - val zip: FanInShape2[Out, T, Out Pair T] = b.add(ZipLatest.create[Out, T]) - b.from(s).toInlet(zip.in1) - FlowShape(zip.in0, zip.out) - } - })), + GraphDSL.create(that, + new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { + def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { + val zip: FanInShape2[Out, T, Out Pair T] = b.add(ZipLatest.create[Out, T]) + b.from(s).toInlet(zip.in1) + FlowShape(zip.in0, zip.out) + } + })), matF) /** @@ -3565,7 +3564,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttle(elements, per.asScala)) @@ -3604,7 +3602,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -3645,7 +3642,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -3691,7 +3687,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -3733,7 +3728,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -3778,7 +3772,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -4188,7 +4181,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @param collapseContext turn each incoming pair of element and context value into an element of this Flow * @param extractContext turn each outgoing element of this Flow into an outgoing context value - * */ def asFlowWithContext[U, CtxU, CtxOut]( collapseContext: function.Function2[U, CtxU, In], diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala index 47d1b92d84..db3e877d32 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala @@ -38,7 +38,6 @@ object FlowWithContext { * operations. * * An "empty" flow can be created by calling `FlowWithContext[Ctx, T]`. - * */ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat]( delegate: javadsl.Flow[Pair[In, CtxIn], Pair[Out, CtxOut], Mat]) @@ -151,11 +150,9 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat]( * @see [[akka.stream.javadsl.Flow.grouped]] */ def grouped(n: Int): FlowWithContext[ - In, - CtxIn, + In, CtxIn, java.util.List[Out @uncheckedVariance], - java.util.List[CtxOut @uncheckedVariance], - Mat] = + java.util.List[CtxOut @uncheckedVariance], Mat] = viaScala(_.grouped(n).map(_.asJava).mapContext(_.asJava)) /** @@ -219,11 +216,9 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat]( * @see [[akka.stream.javadsl.Flow.sliding]] */ def sliding(n: Int, step: Int = 1): FlowWithContext[ - In, - CtxIn, + In, CtxIn, java.util.List[Out @uncheckedVariance], - java.util.List[CtxOut @uncheckedVariance], - Mat] = + java.util.List[CtxOut @uncheckedVariance], Mat] = viaScala(_.sliding(n, step).map(_.asJava).mapContext(_.asJava)) /** @@ -358,11 +353,7 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat]( private[this] def viaScala[In2, CtxIn2, Out2, CtxOut2, Mat2]( f: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] => scaladsl.FlowWithContext[ - In2, - CtxIn2, - Out2, - CtxOut2, - Mat2]): FlowWithContext[In2, CtxIn2, Out2, CtxOut2, Mat2] = + In2, CtxIn2, Out2, CtxOut2, Mat2]): FlowWithContext[In2, CtxIn2, Out2, CtxOut2, Mat2] = f(this.asScala).asJava } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala index 3c3dd11abe..50e8f1324f 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala @@ -111,7 +111,6 @@ object Framing { * For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`. * Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`. * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise. - * */ def lengthField( fieldLength: Int, diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala index 6235b0d549..677e249721 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala @@ -622,11 +622,11 @@ object GraphDSL extends GraphCreate { val sListH = gbuilder.delegate.add(graphs.get(0), toList) val sListT = graphs.subList(1, graphs.size()).asScala.map(g => gbuilder.delegate.add(g, combine)).asJava val s = buildBlock(gbuilder, { - val newList = new util.ArrayList[IS] - newList.add(sListH) - newList.addAll(sListT) - newList - }) + val newList = new util.ArrayList[IS] + newList.add(sListH) + newList.addAll(sListT) + newList + }) new GenericGraph(s, gbuilder.delegate.result(s)) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala b/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala index c51970416b..8057f8b7e8 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala @@ -19,7 +19,6 @@ import akka.util.ccompat.JavaConverters._ * '''Completes when''' all upstreams complete (eagerClose=false) or one upstream completes (eagerClose=true) * * '''Cancels when''' downstream cancels - * */ object MergeLatest { diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala index 1ad7340f04..762dd363ba 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala @@ -192,7 +192,6 @@ object RestartSource { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -223,7 +222,6 @@ object RestartSource { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -256,7 +254,6 @@ object RestartSource { * @param maxRestarts the amount of restarts is capped to this amount within a time frame of minBackoff. * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -290,7 +287,6 @@ object RestartSource { * @param maxRestarts the amount of restarts is capped to this amount within a time frame of minBackoff. * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -317,7 +313,6 @@ object RestartSource { * * @param settings [[RestartSettings]] defining restart configuration * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ def onFailuresWithBackoff[T](settings: RestartSettings, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = akka.stream.scaladsl.RestartSource diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala index 51577809e3..7e0f5917cd 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala @@ -103,7 +103,7 @@ object Sink { /** * A [[Sink]] that will always backpressure never cancel and never consume any elements from the stream. - * */ + */ def never[T]: Sink[T, CompletionStage[Done]] = new Sink(scaladsl.Sink.never.toCompletionStage()) @@ -254,7 +254,6 @@ object Sink { * of the actor will grow. For potentially slow consumer actors it is recommended * to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate * limiting operator in front of this `Sink`. - * */ def actorRef[In](ref: ActorRef, onCompleteMessage: Any): Sink[In, NotUsed] = new Sink(scaladsl.Sink.actorRef[In](ref, onCompleteMessage, (t: Throwable) => Status.Failure(t))) @@ -482,7 +481,7 @@ object Sink { */ def lazyCompletionStageSink[T, M](create: Creator[CompletionStage[Sink[T, M]]]): Sink[T, CompletionStage[M]] = new Sink(scaladsl.Sink.lazyFutureSink { () => - create.create().toScala.map(_.asScala)((ExecutionContexts.parasitic)) + create.create().toScala.map(_.asScala)(ExecutionContexts.parasitic) }).mapMaterializedValue(_.toJava) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala index 1915da5f45..6fb9755c73 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala @@ -454,18 +454,19 @@ object Source { bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = new Source(scaladsl.Source.actorRef(new JavaPartialFunction[Any, CompletionStrategy] { - override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { - val result = completionMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, new JavaPartialFunction[Any, Throwable] { - override def apply(x: Any, isCheck: Boolean): Throwable = { - val result = failureMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, bufferSize, overflowStrategy)) + override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { + val result = completionMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, + new JavaPartialFunction[Any, Throwable] { + override def apply(x: Any, isCheck: Boolean): Throwable = { + val result = failureMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, bufferSize, overflowStrategy)) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -513,10 +514,10 @@ object Source { @deprecated("Use variant accepting completion and failure matchers", "2.6.0") def actorRef[T](bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = new Source(scaladsl.Source.actorRef({ - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause }, bufferSize, overflowStrategy)) + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, { case akka.actor.Status.Failure(cause) => cause }, bufferSize, overflowStrategy)) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -536,19 +537,21 @@ object Source { ackMessage: Any, completionMatcher: akka.japi.function.Function[Any, java.util.Optional[CompletionStrategy]], failureMatcher: akka.japi.function.Function[Any, java.util.Optional[Throwable]]): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, new JavaPartialFunction[Any, CompletionStrategy] { - override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { - val result = completionMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, new JavaPartialFunction[Any, Throwable] { - override def apply(x: Any, isCheck: Boolean): Throwable = { - val result = failureMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - })) + new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, + new JavaPartialFunction[Any, CompletionStrategy] { + override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { + val result = completionMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, + new JavaPartialFunction[Any, Throwable] { + override def apply(x: Any, isCheck: Boolean): Throwable = { + val result = failureMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + })) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -572,19 +575,21 @@ object Source { ackMessage: Any, completionMatcher: akka.japi.function.Function[Any, java.util.Optional[CompletionStrategy]], failureMatcher: akka.japi.function.Function[Any, java.util.Optional[Throwable]]): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, new JavaPartialFunction[Any, CompletionStrategy] { - override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { - val result = completionMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, new JavaPartialFunction[Any, Throwable] { - override def apply(x: Any, isCheck: Boolean): Throwable = { - val result = failureMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - })) + new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, + new JavaPartialFunction[Any, CompletionStrategy] { + override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { + val result = completionMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, + new JavaPartialFunction[Any, Throwable] { + override def apply(x: Any, isCheck: Boolean): Throwable = { + val result = failureMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + })) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -608,11 +613,12 @@ object Source { @Deprecated @deprecated("Use actorRefWithBackpressure accepting completion and failure matchers", "2.6.0") def actorRefWithAck[T](ackMessage: Any): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, { - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause })) + new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, + { + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, { case akka.actor.Status.Failure(cause) => cause })) /** * A graph with the shape of a source logically is a source, this method makes @@ -1519,7 +1525,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(that: Graph[SinkShape[Out], _]): javadsl.Source[Out, Mat] = new Source(delegate.wireTap(that)) @@ -1661,7 +1666,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new Source(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -1740,7 +1746,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new Source(delegate.mergeAll(seq, eagerComplete)) } @@ -2210,7 +2217,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): javadsl.Source[Out, Mat] = new Source(delegate.mapError(pf)) @@ -2232,7 +2238,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable](clazz: Class[E], f: function.Function[E, Throwable]): javadsl.Source[Out, Mat] = mapError { @@ -2318,7 +2323,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWithRetries( attempts: Int, @@ -2355,9 +2359,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ attempts: Int, clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = - recoverWithRetries(attempts, { - case elem if clazz.isInstance(elem) => supplier.get() - }: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]) + recoverWithRetries(attempts, + { + case elem if clazz.isInstance(elem) => supplier.get() + }: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]) /** * Transform each input element into an `Iterable` of output elements that is @@ -2610,7 +2615,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): javadsl.Source[Out, Mat] = new Source(delegate.filter(p.test)) @@ -3608,8 +3612,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Cancels when''' downstream cancels or substream cancels */ def prefixAndTail(n: Int): javadsl.Source[ - Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], - Mat] = + Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], Mat] = new Source(delegate.prefixAndTail(n).map { case (taken, tail) => Pair(taken.asJava, tail.asJava) }) /** @@ -3631,7 +3634,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): javadsl.Source[Out2, Mat] = { @@ -4116,7 +4119,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.Source[Out, Mat] = new Source(delegate.throttle(elements, per.asScala)) @@ -4155,7 +4157,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -4196,7 +4197,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -4235,7 +4235,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -4280,7 +4279,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -4329,7 +4327,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -4720,7 +4717,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ /** * Transform this source whose element is ``e`` into a source producing tuple ``(e, f(e))`` - **/ + */ def asSourceWithContext[Ctx](extractContext: function.Function[Out, Ctx]): SourceWithContext[Out, Ctx, Mat] = new scaladsl.SourceWithContext(this.asScala.map(x => (x, extractContext.apply(x)))).asJava diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala index b5b7b85228..0df13dc476 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala @@ -165,7 +165,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: function.Procedure[Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.wireTap(f(_))) @@ -347,7 +346,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.filter(p.test)) @@ -1036,7 +1034,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recover(pf: PartialFunction[Throwable, Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.recover(pf)) @@ -1059,7 +1056,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ @deprecated("Use recoverWithRetries instead.", "2.4.4") def recoverWith( @@ -1087,7 +1083,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWithRetries( attempts: Int, @@ -1111,7 +1106,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): SubFlow[In, Out, Mat @uncheckedVariance] = new SubFlow(delegate.mapError(pf)) @@ -1133,7 +1127,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable]( clazz: Class[E], @@ -1230,7 +1223,6 @@ class SubFlow[In, Out, Mat]( * * @param seed Provides the first state for a conflated value using the first unconsumed element as a start * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflateWithSeed[S]( seed: function.Function[Out, S], @@ -1261,7 +1253,6 @@ class SubFlow[In, Out, Mat]( * see also [[SubFlow.conflateWithSeed]] [[SubFlow.batch]] [[SubFlow.batchWeighted]] * * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflate(aggregate: function.Function2[Out, Out, Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.conflate(aggregate.apply)) @@ -1466,8 +1457,7 @@ class SubFlow[In, Out, Mat]( */ def prefixAndTail(n: Int): SubFlow[ In, - akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], - Mat] = + akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], Mat] = new SubFlow(delegate.prefixAndTail(n).map { case (taken, tail) => akka.japi.Pair(taken.asJava, tail.asJava) }) /** @@ -1489,7 +1479,7 @@ class SubFlow[In, Out, Mat]( * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): SubFlow[In, Out2, Mat] = { @@ -1509,7 +1499,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes and all consumed substreams complete * * '''Cancels when''' downstream cancels - * */ def flatMapConcat[T, M](f: function.Function[Out, _ <: Graph[SourceShape[T], M]]): SubFlow[In, T, Mat] = new SubFlow(delegate.flatMapConcat(x => f(x))) @@ -1783,7 +1772,8 @@ class SubFlow[In, Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubFlow(delegate.mergeAll(seq, eagerComplete)) } @@ -1840,7 +1830,8 @@ class SubFlow[In, Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubFlow(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -2211,7 +2202,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttle(elements, per.asScala)) @@ -2250,7 +2240,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -2295,7 +2284,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -2334,7 +2322,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -2379,7 +2366,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -2428,7 +2414,6 @@ class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala index c22c2ae530..7e9824f707 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala @@ -156,7 +156,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: function.Procedure[Out]): SubSource[Out, Mat] = new SubSource(delegate.wireTap(f(_))) @@ -338,7 +337,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): SubSource[Out, Mat] = new SubSource(delegate.filter(p.test)) @@ -1021,7 +1019,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recover(pf: PartialFunction[Throwable, Out]): SubSource[Out, Mat] = new SubSource(delegate.recover(pf)) @@ -1042,7 +1039,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ @deprecated("Use recoverWithRetries instead.", "2.4.4") def recoverWith(pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubSource[Out, Mat] = @@ -1067,7 +1063,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWithRetries( attempts: Int, @@ -1091,7 +1086,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): SubSource[Out, Mat] = new SubSource(delegate.mapError(pf)) @@ -1113,7 +1107,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable](clazz: Class[E], f: function.Function[E, Throwable]): javadsl.SubSource[Out, Mat] = mapError { @@ -1208,7 +1201,6 @@ class SubSource[Out, Mat]( * * @param seed Provides the first state for a conflated value using the first unconsumed element as a start * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflateWithSeed[S]( seed: function.Function[Out, S], @@ -1239,7 +1231,6 @@ class SubSource[Out, Mat]( * see also [[SubSource.conflateWithSeed]] [[SubSource.batch]] [[SubSource.batchWeighted]] * * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflate(aggregate: function.Function2[Out, Out, Out]): SubSource[Out, Mat] = new SubSource(delegate.conflate(aggregate.apply)) @@ -1442,8 +1433,7 @@ class SubSource[Out, Mat]( * '''Cancels when''' downstream cancels or substream cancels */ def prefixAndTail(n: Int): SubSource[ - akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], - Mat] = + akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], Mat] = new SubSource(delegate.prefixAndTail(n).map { case (taken, tail) => akka.japi.Pair(taken.asJava, tail.asJava) }) /** @@ -1465,7 +1455,7 @@ class SubSource[Out, Mat]( * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): javadsl.SubSource[Out2, Mat] = { @@ -1485,7 +1475,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes and all consumed substreams complete * * '''Cancels when''' downstream cancels - * */ def flatMapConcat[T, M](f: function.Function[Out, _ <: Graph[SourceShape[T], M]]): SubSource[T, Mat] = new SubSource(delegate.flatMapConcat(x => f(x))) @@ -1759,7 +1748,8 @@ class SubSource[Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubSource(delegate.mergeAll(seq, eagerComplete)) } @@ -1817,7 +1807,8 @@ class SubSource[Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubSource(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -2188,7 +2179,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttle(elements, per.asScala)) @@ -2227,7 +2217,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -2268,7 +2257,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -2307,7 +2295,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -2352,7 +2339,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") @@ -2401,7 +2387,6 @@ class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala index 3d447c9fa9..a73a924089 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala @@ -257,7 +257,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](flow1: Graph[FlowShape[I1, O1], M1], flow2: Graph[FlowShape[I2, O2], M2])( combine: (M1, M2) => M): BidiFlow[I1, O1, I2, O2, M] = { @@ -288,7 +287,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlows[I1, O1, I2, O2, M1, M2]( flow1: Graph[FlowShape[I1, O1], M1], diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala index a8e7b3c87f..6e72affe4d 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala @@ -54,7 +54,6 @@ object Compression { * * @param level Compression level (0-9) * @param nowrap if true then use GZIP compatible compression - * */ def deflate(level: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] = CompressionUtils.compressorFlow(() => new DeflateCompressor(level, nowrap)) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala index aed167c56d..cf375667e2 100755 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala @@ -854,7 +854,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recover[T >: Out](pf: PartialFunction[Throwable, T]): Repr[T] = via(Recover(pf)) @@ -876,7 +875,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ @deprecated("Use recoverWithRetries instead.", "2.4.4") def recoverWith[T >: Out](pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] = @@ -906,7 +904,6 @@ trait FlowOps[+Out, +Mat] { * * @param attempts Maximum number of retries or -1 to retry indefinitely * @param pf Receives the failure cause and returns the new Source to be materialized if any - * */ def recoverWithRetries[T >: Out]( attempts: Int, @@ -930,7 +927,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): Repr[Out] = via(MapError(pf)) @@ -947,7 +943,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def map[T](f: Out => T): Repr[T] = via(Map(f)) @@ -972,7 +967,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: Out => Unit): Repr[Out] = wireTap(Sink.foreach(f)).named("wireTap") @@ -993,7 +987,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes and all remaining elements have been emitted * * '''Cancels when''' downstream cancels - * */ def mapConcat[T](f: Out => IterableOnce[T]): Repr[T] = statefulMapConcat(() => f) @@ -2099,7 +2092,7 @@ trait FlowOps[+Out, +Mat] { * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2](n: Int)(f: immutable.Seq[Out] => Flow[Out, Out2, Mat2]): Repr[Out2] = { via(new FlatMapPrefix(n, f)) } @@ -2511,7 +2504,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, mode: ThrottleMode): Repr[Out] = throttle(elements, per, maximumBurst, ConstantFun.oneInt, mode) @@ -2546,7 +2538,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(cost: Int, per: FiniteDuration, costCalculation: (Out) => Int): Repr[Out] = via(new Throttle(cost, per, Throttle.AutomaticMaximumBurst, costCalculation, ThrottleMode.Shaping)) @@ -2588,7 +2579,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -3256,7 +3246,6 @@ trait FlowOps[+Out, +Mat] { * * When needing a prepend operator that is not detached use [[#prependLazy]] * - * * '''Emits when''' element is available from the given [[Source]] or from current stream when the [[Source]] is completed * * '''Backpressures when''' downstream backpressures diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala index 6e4e8af653..93dc79d1c5 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala @@ -35,7 +35,6 @@ object FlowWithContext { * operations. * * An "empty" flow can be created by calling `FlowWithContext[Ctx, T]`. - * */ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: Flow[(In, CtxIn), (Out, CtxOut), Mat]) extends GraphDelegate(delegate) @@ -56,7 +55,7 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: Flow[(In d ~> unzip.in unzip.out0.via(viaFlow) ~> zipper.in0 - unzip.out1 ~> zipper.in1 + unzip.out1 ~> zipper.in1 FlowShape(d.in, zipper.out) })) @@ -90,7 +89,7 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: Flow[(In .create[Pair[JIn, JCtxIn]]() .map(_.toScala) .viaMat(delegate.map { - case (first, second) => - Pair[JOut, JCtxOut](first, second) - }.asJava, javadsl.Keep.right[NotUsed, JMat])) + case (first, second) => + Pair[JOut, JCtxOut](first, second) + }.asJava, javadsl.Keep.right[NotUsed, JMat])) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala index 691144ec97..3134f8bc85 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala @@ -20,7 +20,6 @@ import ccompat._ /** * Shared stream operations for [[FlowWithContext]] and [[SourceWithContext]] that automatically propagate a context * element with each data element. - * */ @ccompatUsedUntil213 trait FlowWithContextOps[+Out, +Ctx, +Mat] { diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala index 1526eb0d70..4e87dacbc5 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala @@ -84,7 +84,6 @@ object Framing { * Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`. * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise. * Must not mutate the given byte array. - * */ def lengthField( fieldLength: Int, @@ -307,10 +306,11 @@ object Framing { if (isClosed(in) && buffer.isEmpty) completeStage() } else { // Emit results and compact buffer - emitMultiple(out, new FrameIterator(), () => { - reset() - if (isClosed(in) && buffer.isEmpty) completeStage() - }) + emitMultiple(out, new FrameIterator(), + () => { + reset() + if (isClosed(in) && buffer.isEmpty) completeStage() + }) } private def reset(): Unit = { @@ -371,7 +371,7 @@ object Framing { computeFrameSize: Option[(Array[Byte], Int) => Int]) extends GraphStage[FlowShape[ByteString, ByteString]] { - //for the sake of binary compatibility + // for the sake of binary compatibility def this(lengthFieldLength: Int, lengthFieldOffset: Int, maximumFrameLength: Int, byteOrder: ByteOrder) = this(lengthFieldLength, lengthFieldOffset, maximumFrameLength, byteOrder, None) @@ -393,7 +393,6 @@ object Framing { /** * push, and reset frameSize and buffer - * */ private def pushFrame() = { val emit = buffer.take(frameSize).compact @@ -407,7 +406,6 @@ object Framing { /** * try to push downstream, if failed then try to pull upstream - * */ private def tryPushFrame() = { val buffSize = buffer.size diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala index 529fc6c1f5..42272b555b 100755 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala @@ -229,7 +229,7 @@ final class MergePreferred[T](val secondaryPorts: Int, val eagerComplete: Boolea } override def preStart(): Unit = { - //while initializing this `MergePreferredShape`, the `preferred` port gets added to `inlets` by side-effect. + // while initializing this `MergePreferredShape`, the `preferred` port gets added to `inlets` by side-effect. shape.inlets.foreach(tryPull) } @@ -451,7 +451,6 @@ object Interleave { * '''Completes when''' all upstreams complete (eagerClose=false) or one upstream completes (eagerClose=true) * * '''Cancels when''' downstream cancels - * */ final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerClose: Boolean) extends GraphStage[UniformFanInShape[T, T]] { @@ -568,9 +567,9 @@ final class MergeSorted[T: Ordering] extends GraphStage[FanInShape2[T, T, T]] { // all fan-in stages need to eagerly pull all inputs to get cycles started pull(right) read(left)(l => { - other = l - readR() - }, () => passAlong(right, out)) + other = l + readR() + }, () => passAlong(right, out)) } } } @@ -599,7 +598,6 @@ object Broadcast { * * '''Cancels when''' * If eagerCancel is enabled: when any downstream cancels; otherwise: when all downstreams cancel - * */ final class Broadcast[T](val outputPorts: Int, val eagerCancel: Boolean) extends GraphStage[UniformFanOutShape[T, T]] { // one output might seem counter intuitive but saves us from special handling in other places @@ -696,7 +694,6 @@ object WireTap { * '''Completes when''' upstream completes * * '''Cancels when''' the 'main' output cancels - * */ @InternalApi private[stream] final class WireTap[T] extends GraphStage[FanOutShape2[T, T, T]] { @@ -709,27 +706,29 @@ private[stream] final class WireTap[T] extends GraphStage[FanOutShape2[T, T, T]] override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var pendingTap: Option[T] = None - setHandler(in, new InHandler { - override def onPush() = { - val elem = grab(in) - push(outMain, elem) - if (isAvailable(outTap)) { - push(outTap, elem) - } else { - pendingTap = Some(elem) + setHandler(in, + new InHandler { + override def onPush() = { + val elem = grab(in) + push(outMain, elem) + if (isAvailable(outTap)) { + push(outTap, elem) + } else { + pendingTap = Some(elem) + } } - } - }) + }) - setHandler(outMain, new OutHandler { - override def onPull() = { - pull(in) - } + setHandler(outMain, + new OutHandler { + override def onPull() = { + pull(in) + } - override def onDownstreamFinish(cause: Throwable): Unit = { - cancelStage(cause) - } - }) + override def onDownstreamFinish(cause: Throwable): Unit = { + cancelStage(cause) + } + }) // The 'tap' output can neither backpressure, nor cancel, the stage. setHandler( @@ -745,11 +744,12 @@ private[stream] final class WireTap[T] extends GraphStage[FanOutShape2[T, T, T]] } override def onDownstreamFinish(cause: Throwable): Unit = { - setHandler(in, new InHandler { - override def onPush() = { - push(outMain, grab(in)) - } - }) + setHandler(in, + new InHandler { + override def onPush() = { + push(outMain, grab(in)) + } + }) // Allow any outstanding element to be garbage-collected pendingTap = None } @@ -770,7 +770,8 @@ object Partition { * * @param outputPorts number of output ports * @param partitioner function deciding which output each element will be targeted - */ // FIXME BC add `eagerCancel: Boolean = false` parameter + */ + // FIXME BC add `eagerCancel: Boolean = false` parameter def apply[T](outputPorts: Int, partitioner: T => Int): Partition[T] = new Partition(outputPorts, partitioner, false) } @@ -1237,19 +1238,20 @@ class ZipWithN[A, O](zipper: immutable.Seq[A] => O)(n: Int) extends GraphStage[U shape.inlets.zipWithIndex.foreach { case (in, i) => - setHandler(in, new InHandler { - override def onPush(): Unit = { - // Only one context can be propagated. Picked the first element as an arbitrary but deterministic choice. - if (i == 0) contextPropagation.suspendContext() - pending -= 1 - if (pending == 0) pushAll() - } + setHandler(in, + new InHandler { + override def onPush(): Unit = { + // Only one context can be propagated. Picked the first element as an arbitrary but deterministic choice. + if (i == 0) contextPropagation.suspendContext() + pending -= 1 + if (pending == 0) pushAll() + } - override def onUpstreamFinish(): Unit = { - if (!isAvailable(in)) completeStage() - willShutDown = true - } - }) + override def onUpstreamFinish(): Unit = { + if (!isAvailable(in)) completeStage() + willShutDown = true + } + }) } def onPull(): Unit = { @@ -1422,15 +1424,16 @@ private[stream] final class OrElse[T] extends GraphStage[UniformFanInShape[T, T] } } - setHandler(secondary, new InHandler { - override def onPush(): Unit = { - push(out, grab(secondary)) - } + setHandler(secondary, + new InHandler { + override def onPush(): Unit = { + push(out, grab(secondary)) + } - override def onUpstreamFinish(): Unit = { - if (isClosed(primary)) completeStage() - } - }) + override def onUpstreamFinish(): Unit = { + if (isClosed(primary)) completeStage() + } + }) setHandlers(primary, out, this) } @@ -1445,7 +1448,7 @@ object MergeSequence { private implicit def ordering[T]: Ordering[Pushed[T]] = Ordering.by[Pushed[T], Long](_.sequence).reverse - /** @see [[MergeSequence]] **/ + /** @see [[MergeSequence]] * */ def apply[T](inputPorts: Int = 2)(extractSequence: T => Long): Graph[UniformFanInShape[T, T], NotUsed] = GraphStages.withDetachedInputs(new MergeSequence[T](inputPorts)(extractSequence)) } @@ -1675,9 +1678,10 @@ object GraphDSL extends GraphApply { * @return The outlet that will emit the materialized value. */ def materializedValue: Outlet[M @uncheckedVariance] = - add(Source.maybe[M], { (prev: M, prom: Promise[Option[M]]) => - prom.success(Some(prev)); prev - }).out + add(Source.maybe[M], + { (prev: M, prom: Promise[Option[M]]) => + prom.success(Some(prev)); prev + }).out private[GraphDSL] def traversalBuilder: TraversalBuilder = traversalBuilderInProgress diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala index 20d166631e..7011f8496c 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala @@ -42,7 +42,6 @@ object MergeHub { /** * Set the operation mode of the linked MergeHub to draining. In this mode the Hub will cancel any new producer and * will complete as soon as all the currently connected producers complete. - * */ def drainAndComplete(): Unit } @@ -172,10 +171,9 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int, drainingEnabled: Boo @volatile private[this] var draining = false private[this] val demands = scala.collection.mutable.LongMap.empty[InputState] - private[this] val wakeupCallback = getAsyncCallback[NotUsed]( - (_) => - // We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details. - if (isAvailable(out)) tryProcessNext(firstAttempt = true)) + private[this] val wakeupCallback = getAsyncCallback[NotUsed](_ => + // We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details. + if (isAvailable(out)) tryProcessNext(firstAttempt = true)) private[MergeHub] val drainingCallback: Option[AsyncCallback[NotUsed]] = { // Only create an async callback if the draining support is enabled in order to avoid book-keeping costs. @@ -430,7 +428,6 @@ object BroadcastHub { * all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later * materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are * cancelled are simply removed from the dynamic set of consumers. - * */ def sink[T]: Sink[T, Source[T, NotUsed]] = sink(bufferSize = defaultBufferSize) @@ -596,7 +593,7 @@ private[akka] class BroadcastHub[T](bufferSize: Int) // TODO: Try to eliminate modulo division somehow... val wheelSlot = offset & WheelMask var consumersInSlot = consumerWheel(wheelSlot) - //debug(s"consumers before removal $consumersInSlot") + // debug(s"consumers before removal $consumersInSlot") var remainingConsumersInSlot: List[Consumer] = Nil var removedConsumer: Consumer = null diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala index 30abb40c3c..e48dcc9c66 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala @@ -75,9 +75,10 @@ object JsonFraming { def tryPopBuffer(): Unit = { try buffer.poll() match { - case Some(json) => push(out, json) - case _ => if (isClosed(in)) complete() else pull(in) - } catch { + case Some(json) => push(out, json) + case _ => if (isClosed(in)) complete() else pull(in) + } + catch { case NonFatal(ex) => failStage(ex) } } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala index feafc94d0e..41b5520e8e 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala @@ -20,7 +20,6 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } * '''Completes when''' all upstreams complete (eagerClose=false) or one upstream completes (eagerClose=true) * * '''Cancels when''' downstream cancels - * */ object MergeLatest { diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala index 826d57e05f..1615ade546 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala @@ -299,13 +299,14 @@ private abstract class RestartWithBackoffLogic[S <: Shape]( } }) - setHandler(out, new OutHandler { - override def onPull() = sinkIn.pull() - override def onDownstreamFinish(cause: Throwable) = { - finishing = true - sinkIn.cancel(cause) - } - }) + setHandler(out, + new OutHandler { + override def onPull() = sinkIn.pull() + override def onDownstreamFinish(cause: Throwable) = { + finishing = true + sinkIn.cancel(cause) + } + }) sinkIn } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala index 55aff959a7..312d0908ac 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala @@ -116,7 +116,6 @@ object RestartSource { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -147,7 +146,6 @@ object RestartSource { * @param maxRestarts the amount of restarts is capped to this amount within a time frame of minBackoff. * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -174,7 +172,6 @@ object RestartSource { * * @param settings [[RestartSettings]] defining restart configuration * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ def onFailuresWithBackoff[T](settings: RestartSettings)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = Source.fromGraph(new RestartWithBackoffSource(sourceFactory, settings, onlyOnFailures = true)) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala index 80b8611ae8..5d5558fa2d 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala @@ -163,11 +163,11 @@ object Sink { */ def fromMaterializer[T, M](factory: (Materializer, Attributes) => Sink[T, M]): Sink[T, Future[M]] = Flow - .fromMaterializer({ (mat, attr) => + .fromMaterializer { (mat, attr) => Flow.fromGraph(GraphDSL.createGraph(factory(mat, attr)) { b => sink => FlowShape(sink.in, b.materializedValue.outlet) }) - }) + } .to(Sink.head) /** @@ -303,7 +303,7 @@ object Sink { /** * A [[Sink]] that will always backpressure never cancel and never consume any elements from the stream. - * */ + */ def never: Sink[Any, Future[Done]] = _never private[this] val _never: Sink[Any, Future[Done]] = fromGraph(GraphStages.NeverSink) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala index 9365a2fe7e..e51a32897c 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala @@ -52,10 +52,10 @@ final class Source[+Out, +Mat]( combine: (Mat, Mat2) => Mat3): Source[T, Mat3] = { if (flow.traversalBuilder eq Flow.identityTraversalBuilder) if (combine == Keep.left) - //optimization by returning this - this.asInstanceOf[Source[T, Mat3]] //Mat == Mat3, due to Keep.left + // optimization by returning this + this.asInstanceOf[Source[T, Mat3]] // Mat == Mat3, due to Keep.left else if (combine == Keep.right || combine == Keep.none) // Mat3 = NotUsed - //optimization with LinearTraversalBuilder.empty() + // optimization with LinearTraversalBuilder.empty() new Source[T, Mat3]( traversalBuilder.append(LinearTraversalBuilder.empty(), flow.shape, combine), SourceShape(shape.out).asInstanceOf[SourceShape[T]]) @@ -235,7 +235,7 @@ final class Source[+Out, +Mat]( /** * Transform this source whose element is ``e`` into a source producing tuple ``(e, f(e))`` - **/ + */ def asSourceWithContext[Ctx](f: Out => Ctx): SourceWithContext[Out, Ctx, Mat] = new SourceWithContext(this.map(e => (e, f(e)))) @@ -676,17 +676,16 @@ object Source { * * See also [[akka.stream.scaladsl.Source.queue]]. * - * * @param bufferSize The size of the buffer in element count * @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer */ @deprecated("Use variant accepting completion and failure matchers instead", "2.6.0") def actorRef[T](bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = actorRef({ - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause }, bufferSize, overflowStrategy) + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, { case akka.actor.Status.Failure(cause) => cause }, bufferSize, overflowStrategy) /** * INTERNAL API @@ -741,11 +740,12 @@ object Source { */ @deprecated("Use actorRefWithBackpressure accepting completion and failure matchers instead", "2.6.0") def actorRefWithAck[T](ackMessage: Any): Source[T, ActorRef] = - actorRefWithAck(None, ackMessage, { - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause }) + actorRefWithAck(None, ackMessage, + { + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, { case akka.actor.Status.Failure(cause) => cause }) /** * Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. @@ -755,7 +755,7 @@ object Source { Source.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val c = b.add(strategy(rest.size + 2)) - first ~> c.in(0) + first ~> c.in(0) second ~> c.in(1) @tailrec def combineRest(idx: Int, i: Iterator[Source[T, _]]): SourceShape[U] = diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala index 3eba12466b..1418a4e801 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala @@ -43,7 +43,7 @@ final class SourceWithContext[+Out, +Ctx, +Mat] private[stream] (delegate: Sourc d ~> unzip.in unzip.out0.via(viaFlow) ~> zipper.in0 - unzip.out1 ~> zipper.in1 + unzip.out1 ~> zipper.in1 SourceShape(zipper.out) })) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala index e3defd9c26..9622dcac39 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala @@ -108,9 +108,9 @@ object StreamConverters { def javaCollector[T, R](collectorFactory: () => java.util.stream.Collector[T, _ <: Any, R]): Sink[T, Future[R]] = Flow[T] .fold { - new FirstCollectorState[T, R](collectorFactory.asInstanceOf[() => java.util.stream.Collector[T, Any, R]]): CollectorState[ - T, - R] + new FirstCollectorState[T, + R](collectorFactory.asInstanceOf[() => java.util.stream.Collector[T, Any, R]]): CollectorState[ + T, R] } { (state, elem) => state.update(elem) } @@ -178,29 +178,28 @@ object StreamConverters { // TODO removing the QueueSink name, see issue #22523 Sink .fromGraph(new QueueSink[T](1).withAttributes(Attributes.none)) - .mapMaterializedValue( - queue => - StreamSupport - .stream( - Spliterators.spliteratorUnknownSize( - new java.util.Iterator[T] { - var nextElementFuture: Future[Option[T]] = queue.pull() - var nextElement: Option[T] = _ + .mapMaterializedValue(queue => + StreamSupport + .stream( + Spliterators.spliteratorUnknownSize( + new java.util.Iterator[T] { + var nextElementFuture: Future[Option[T]] = queue.pull() + var nextElement: Option[T] = _ - override def hasNext: Boolean = { - nextElement = Await.result(nextElementFuture, Inf) - nextElement.isDefined - } + override def hasNext: Boolean = { + nextElement = Await.result(nextElementFuture, Inf) + nextElement.isDefined + } - override def next(): T = { - val next = nextElement.get - nextElementFuture = queue.pull() - next - } - }, - 0), - false) - .onClose(new Runnable { def run = queue.cancel() })) + override def next(): T = { + val next = nextElement.get + nextElementFuture = queue.pull() + next + } + }, + 0), + false) + .onClose(new Runnable { def run = queue.cancel() })) .withAttributes(DefaultAttributes.asJavaStream) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala index 17acac9016..929430fc11 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala @@ -87,10 +87,11 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { // just wraps/unwraps the TLS byte events to provide ByteString, ByteString flows private val tlsWrapping: BidiFlow[ByteString, TLSProtocol.SendBytes, TLSProtocol.SslTlsInbound, ByteString, NotUsed] = - BidiFlow.fromFlows(Flow[ByteString].map(TLSProtocol.SendBytes.apply), Flow[TLSProtocol.SslTlsInbound].collect { - case sb: TLSProtocol.SessionBytes => sb.bytes - // ignore other kinds of inbounds (currently only Truncated) - }) + BidiFlow.fromFlows(Flow[ByteString].map(TLSProtocol.SendBytes.apply), + Flow[TLSProtocol.SslTlsInbound].collect { + case sb: TLSProtocol.SessionBytes => sb.bytes + // ignore other kinds of inbounds (currently only Truncated) + }) /** * INTERNAL API diff --git a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala index a3690e6c00..7337d80db9 100644 --- a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala +++ b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala @@ -140,7 +140,6 @@ sealed trait RunningInterpreter extends InterpreterSnapshot { } /** - * * Not for user extension */ @DoNotInherit @ApiMayChange diff --git a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala index e22e887b96..bbbe0be274 100644 --- a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala +++ b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala @@ -606,11 +606,13 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: connection.slot match { case Empty | _ @(_: Cancelled) => false // cancelled (element is discarded when cancelled) case _ => true // completed but element still there to grab - } else if ((connection.portState & (InReady | InFailed)) == (InReady | InFailed)) + } + else if ((connection.portState & (InReady | InFailed)) == (InReady | InFailed)) connection.slot match { case Failed(_, elem) => elem.asInstanceOf[AnyRef] ne Empty // failed but element still there to grab case _ => false - } else false + } + else false } } @@ -782,14 +784,14 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * the `onClose` function is invoked with the elements which were read. */ final protected def readN[T](in: Inlet[T], n: Int)(andThen: Seq[T] => Unit, onClose: Seq[T] => Unit): Unit = - //FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity + // FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity if (n < 0) throw new IllegalArgumentException("cannot read negative number of elements") else if (n == 0) andThen(Nil) else { val result = new Array[AnyRef](n).asInstanceOf[Array[T]] var pos = 0 - if (isAvailable(in)) { //If we already have data available, then shortcircuit and read the first + if (isAvailable(in)) { // If we already have data available, then shortcircuit and read the first result(pos) = grab(in) pos += 1 } @@ -797,11 +799,12 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: if (n != pos) { // If we aren't already done requireNotReading(in) if (!hasBeenPulled(in)) pull(in) - setHandler(in, new Reading(in, n - pos, getHandler(in))((elem: T) => { - result(pos) = elem - pos += 1 - if (pos == n) andThen(result.toSeq) - }, () => onClose(result.take(pos).toSeq))) + setHandler(in, + new Reading(in, n - pos, getHandler(in))((elem: T) => { + result(pos) = elem + pos += 1 + if (pos == n) andThen(result.toSeq) + }, () => onClose(result.take(pos).toSeq))) } else andThen(result.toSeq) } @@ -816,7 +819,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: n: Int, andThen: Procedure[java.util.List[T]], onClose: Procedure[java.util.List[T]]): Unit = { - //FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity + // FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity import akka.util.ccompat.JavaConverters._ readN(in, n)(seq => andThen(seq.asJava), seq => onClose(seq.asJava)) } @@ -1232,7 +1235,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: Future.failed(streamDetachedException) } - //external call + // external call override def invoke(event: T): Unit = invokeWithPromise(event, NoPromise) @tailrec @@ -1427,22 +1430,23 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: private var closed = false private var pulled = false - private val _sink = new SubSink[T](name, getAsyncCallback[ActorSubscriberMessage] { msg => - if (!closed) msg match { - case OnNext(e) => - elem = e.asInstanceOf[T] - pulled = false - handler.onPush() - case OnComplete => - closed = true - handler.onUpstreamFinish() - GraphStageLogic.this.completedOrFailed(this) - case OnError(ex) => - closed = true - handler.onUpstreamFailure(ex) - GraphStageLogic.this.completedOrFailed(this) - } - }.invoke _) + private val _sink = new SubSink[T](name, + getAsyncCallback[ActorSubscriberMessage] { msg => + if (!closed) msg match { + case OnNext(e) => + elem = e.asInstanceOf[T] + pulled = false + handler.onPush() + case OnComplete => + closed = true + handler.onUpstreamFinish() + GraphStageLogic.this.completedOrFailed(this) + case OnError(ex) => + closed = true + handler.onUpstreamFailure(ex) + GraphStageLogic.this.completedOrFailed(this) + } + }.invoke _) GraphStageLogic.this.created(this) @@ -1924,7 +1928,7 @@ trait OutHandler { require(cause ne null, "Cancellation cause must not be null") require(thisStage.lastCancellationCause eq null, "onDownstreamFinish(cause) must not be called recursively") thisStage.lastCancellationCause = cause - (onDownstreamFinish(): @nowarn("msg=deprecated")) // if not overridden, call old deprecated variant + onDownstreamFinish(): @nowarn("msg=deprecated") // if not overridden, call old deprecated variant } finally thisStage.lastCancellationCause = null } } diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index 9729bda0a0..a452bfe0c7 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -302,20 +302,22 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte // this actors mailbox at some other level on our call stack if (!mbox.ctdLock.isHeldByCurrentThread) { var intex = interruptedEx - val gotLock = try { - mbox.ctdLock.tryLock(50, TimeUnit.MILLISECONDS) - } catch { - case ie: InterruptedException => - Thread.interrupted() // clear interrupted flag before we continue, exception will be thrown later - intex = ie - false - } - if (gotLock) { - val ie = try { - process(intex) - } finally { - mbox.ctdLock.unlock + val gotLock = + try { + mbox.ctdLock.tryLock(50, TimeUnit.MILLISECONDS) + } catch { + case ie: InterruptedException => + Thread.interrupted() // clear interrupted flag before we continue, exception will be thrown later + intex = ie + false } + if (gotLock) { + val ie = + try { + process(intex) + } finally { + mbox.ctdLock.unlock + } throwInterruptionIfExistsOrSet(ie) } else { // if we didn't get the lock and our mailbox still has messages, then we need to try again diff --git a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala index f6b23570bf..2fe5cda8f2 100644 --- a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala +++ b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala @@ -91,14 +91,15 @@ object SocketUtil { val addr = new InetSocketAddress(address, 0) try if (udp) { - val ds = DatagramChannel.open().socket() - ds.bind(addr) - (ds, new InetSocketAddress(address, ds.getLocalPort)) - } else { - val ss = ServerSocketChannel.open().socket() - ss.bind(addr) - (ss, new InetSocketAddress(address, ss.getLocalPort)) - } catch { + val ds = DatagramChannel.open().socket() + ds.bind(addr) + (ds, new InetSocketAddress(address, ds.getLocalPort)) + } else { + val ss = ServerSocketChannel.open().socket() + ss.bind(addr) + (ss, new InetSocketAddress(address, ss.getLocalPort)) + } + catch { case NonFatal(ex) => throw new RuntimeException(s"Binding to $addr failed with ${ex.getMessage}", ex) } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 1cd11085f2..7bbb93b186 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -25,44 +25,44 @@ import akka.pattern.ask @nowarn // 'early initializers' are deprecated on 2.13 and will be replaced with trait parameters on 2.14. https://github.com/akka/akka/issues/26753 class TestActorRef[T <: Actor](_system: ActorSystem, _props: Props, _supervisor: ActorRef, name: String) extends LocalActorRef({ - val disregard = _supervisor match { - case l: LocalActorRef => l.underlying.reserveChild(name) - case r: RepointableActorRef => - r.underlying match { - case _: UnstartedCell => - throw new IllegalStateException( - "cannot attach a TestActor to an unstarted top-level actor, ensure that it is started by sending a message and observing the reply") - case c: ActorCell => c.reserveChild(name) - case o => - _system.log.error( - "trying to attach child {} to unknown type of supervisor cell {}, this is not going to end well", - name, - o.getClass) - } - case s => - _system.log.error( - "trying to attach child {} to unknown type of supervisor {}, this is not going to end well", - name, - s.getClass) - } + val disregard = _supervisor match { + case l: LocalActorRef => l.underlying.reserveChild(name) + case r: RepointableActorRef => + r.underlying match { + case _: UnstartedCell => + throw new IllegalStateException( + "cannot attach a TestActor to an unstarted top-level actor, ensure that it is started by sending a message and observing the reply") + case c: ActorCell => c.reserveChild(name) + case o => + _system.log.error( + "trying to attach child {} to unknown type of supervisor cell {}, this is not going to end well", + name, + o.getClass) + } + case s => + _system.log.error( + "trying to attach child {} to unknown type of supervisor {}, this is not going to end well", + name, + s.getClass) + } - _system.asInstanceOf[ActorSystemImpl] - }, { - _props.withDispatcher( - if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id - else _props.dispatcher) - }, { - val props = _props.withDispatcher( - if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id - else _props.dispatcher) - _system.dispatchers.lookup(props.dispatcher) - }, { - val props = _props.withDispatcher( - if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id - else _props.dispatcher) - val dispatcher = _system.dispatchers.lookup(props.dispatcher) - _system.mailboxes.getMailboxType(props, dispatcher.configurator.config) - }, _supervisor.asInstanceOf[InternalActorRef], _supervisor.path / name) { + _system.asInstanceOf[ActorSystemImpl] + }, { + _props.withDispatcher( + if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id + else _props.dispatcher) + }, { + val props = _props.withDispatcher( + if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id + else _props.dispatcher) + _system.dispatchers.lookup(props.dispatcher) + }, { + val props = _props.withDispatcher( + if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id + else _props.dispatcher) + val dispatcher = _system.dispatchers.lookup(props.dispatcher) + _system.mailboxes.getMailboxType(props, dispatcher.configurator.config) + }, _supervisor.asInstanceOf[InternalActorRef], _supervisor.path / name) { val props = _props.withDispatcher( if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id @@ -193,37 +193,37 @@ object TestActorRef { } def apply[T <: Actor](name: String)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = - apply[T](Props({ - system - .asInstanceOf[ExtendedActorSystem] - .dynamicAccess - .createInstanceFor[T](t.runtimeClass, Nil) - .recover(dynamicCreateRecover) - .get - }), name) - - def apply[T <: Actor](supervisor: ActorRef)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = - apply[T](Props({ - system - .asInstanceOf[ExtendedActorSystem] - .dynamicAccess - .createInstanceFor[T](t.runtimeClass, Nil) - .recover(dynamicCreateRecover) - .get - }), supervisor) - - def apply[T <: Actor](supervisor: ActorRef, name: String)( - implicit t: ClassTag[T], - system: ActorSystem): TestActorRef[T] = - apply[T]( - Props({ + apply[T](Props { system .asInstanceOf[ExtendedActorSystem] .dynamicAccess .createInstanceFor[T](t.runtimeClass, Nil) .recover(dynamicCreateRecover) .get - }), + }, name) + + def apply[T <: Actor](supervisor: ActorRef)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = + apply[T](Props { + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[T](t.runtimeClass, Nil) + .recover(dynamicCreateRecover) + .get + }, supervisor) + + def apply[T <: Actor](supervisor: ActorRef, name: String)( + implicit t: ClassTag[T], + system: ActorSystem): TestActorRef[T] = + apply[T]( + Props { + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[T](t.runtimeClass, Nil) + .recover(dynamicCreateRecover) + .get + }, supervisor, name) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index cf1011c07e..43d8d2e3f3 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -183,9 +183,9 @@ trait TestKitBase { TestActor.props(queue).withDispatcher(CallingThreadDispatcher.Id), "%s-%d".format(testActorName, TestKit.testActorId.incrementAndGet)) awaitCond(ref match { - case r: RepointableRef => r.isStarted - case _ => true - }, 3.seconds.dilated, 10.millis) + case r: RepointableRef => r.isStarted + case _ => true + }, 3.seconds.dilated, 10.millis) ref } @@ -415,8 +415,9 @@ trait TestKitBase { val prev_end = end end = start + max_diff - val ret = try f - finally end = prev_end + val ret = + try f + finally end = prev_end val diff = now - start assert(min <= diff, s"block took ${format(min.unit, diff)}, should at least have been $min") @@ -750,8 +751,8 @@ trait TestKitBase { var elem: AnyRef = queue.peekFirst() var left = leftNow while (left.toNanos > 0 && elem == null) { - //Use of (left / 2) gives geometric series limited by finish time similar to (1/2)^n limited by 1, - //so it is very precise + // Use of (left / 2) gives geometric series limited by finish time similar to (1/2)^n limited by 1, + // so it is very precise Thread.sleep(pollInterval.toMillis min (left / 2).toMillis) left = leftNow if (left.toNanos > 0) { diff --git a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala index ca5592ef4f..736379a185 100644 --- a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala @@ -5,7 +5,7 @@ package akka.testkit.javadsl import java.util.{ List => JList } -import java.util.function.{ Supplier, Function => JFunction } +import java.util.function.{ Function => JFunction, Supplier } import scala.annotation.varargs import scala.concurrent.duration._ @@ -37,8 +37,6 @@ import akka.util.ccompat.JavaConverters._ * are scaled using the `dilated` method, which uses the * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry * "akka.test.timefactor". - * - * */ class TestKit(system: ActorSystem) { @@ -799,15 +797,14 @@ class TestKit(system: ActorSystem) { * * One possible use of this method is for testing whether messages of * certain characteristics are generated at a certain rate: - * */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.13") def receiveWhile[T](max: Duration, idle: Duration, messages: Int, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max, idle, messages)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }) + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) .asJava } @@ -821,7 +818,6 @@ class TestKit(system: ActorSystem) { * * One possible use of this method is for testing whether messages of * certain characteristics are generated at a certain rate: - * */ def receiveWhile[T]( max: java.time.Duration, @@ -829,9 +825,9 @@ class TestKit(system: ActorSystem) { messages: Int, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max.asScala, idle.asScala, messages)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }) + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) .asJava } @@ -839,17 +835,17 @@ class TestKit(system: ActorSystem) { @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.13") def receiveWhile[T](max: Duration, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max = max)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }) + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) .asJava } def receiveWhile[T](max: java.time.Duration, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max = max.asScala)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }) + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) .asJava } diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala index 9b14be3aa5..e385245796 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala @@ -70,11 +70,11 @@ class AkkaSpecSpec extends AnyWordSpec with Matchers { var locker = Seq.empty[DeadLetter] implicit val timeout: Timeout = TestKitExtension(system).DefaultTimeout.duration.dilated(system) val davyJones = otherSystem.actorOf(Props(new Actor { - def receive = { - case m: DeadLetter => locker :+= m - case "Die!" => sender() ! "finally gone"; context.stop(self) - } - }), "davyJones") + def receive = { + case m: DeadLetter => locker :+= m + case "Die!" => sender() ! "finally gone"; context.stop(self) + } + }), "davyJones") system.eventStream.subscribe(davyJones, classOf[DeadLetter]) diff --git a/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala b/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala index 67f46bb6da..9b75de2cc2 100644 --- a/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala @@ -70,27 +70,28 @@ class CoronerSpec extends AnyWordSpec with Matchers { def lockingThread(name: String, initialLocks: List[ReentrantLock]): LockingThread = { val ready = new Semaphore(0) val proceed = new Semaphore(0) - val t = new Thread(new Runnable { - def run = - try recursiveLock(initialLocks) - catch { case _: InterruptedException => () } + val t = new Thread( + new Runnable { + def run = + try recursiveLock(initialLocks) + catch { case _: InterruptedException => () } - def recursiveLock(locks: List[ReentrantLock]): Unit = { - locks match { - case Nil => () - case lock :: rest => { - ready.release() - proceed.acquire() - lock.lockInterruptibly() // Allows us to break deadlock and free threads - try { - recursiveLock(rest) - } finally { - lock.unlock() + def recursiveLock(locks: List[ReentrantLock]): Unit = { + locks match { + case Nil => () + case lock :: rest => { + ready.release() + proceed.acquire() + lock.lockInterruptibly() // Allows us to break deadlock and free threads + try { + recursiveLock(rest) + } finally { + lock.unlock() + } } } } - } - }, name) + }, name) t.start() LockingThread(name, t, ready, proceed) } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 40e3bc0741..e6224bf16b 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -125,9 +125,9 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA val nested = TestActorRef(Props(new Actor { def receive = { case _ => } })) def receive = { case _ => sender() ! nested } })) - a should not be (null) + a should not be null val nested = Await.result((a ? "any").mapTo[ActorRef], timeout.duration) - nested should not be (null) + nested should not be null a should not be theSameInstanceAs(nested) } @@ -136,9 +136,9 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA val nested = context.actorOf(Props(new Actor { def receive = { case _ => } })) def receive = { case _ => sender() ! nested } })) - a should not be (null) + a should not be null val nested = Await.result((a ? "any").mapTo[ActorRef], timeout.duration) - nested should not be (null) + nested should not be null a should not be theSameInstanceAs(nested) } @@ -194,10 +194,10 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA val boss = TestActorRef(Props(new TActor { val ref = TestActorRef(Props(new TActor { - def receiveT = { case _ => } - override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { counter -= 1 } - override def postRestart(reason: Throwable): Unit = { counter -= 1 } - }), self, "child") + def receiveT = { case _ => } + override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { counter -= 1 } + override def postRestart(reason: Throwable): Unit = { counter -= 1 } + }), self, "child") override def supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 1 second)(List(classOf[ActorKilledException])) @@ -274,11 +274,11 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA EventFilter[RuntimeException](occurrences = 1, message = "expected").intercept { val parent = TestProbe() val child = TestActorRef(Props(new Actor { - def receive: Receive = { - case 1 => throw new RuntimeException("expected") - case x => sender() ! x - } - }), parent.ref, "Child") + def receive: Receive = { + case 1 => throw new RuntimeException("expected") + case x => sender() ! x + } + }), parent.ref, "Child") child ! 1 } @@ -287,11 +287,11 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA EventFilter[RuntimeException](occurrences = 1, message = "expected").intercept { val parent = TestProbe() val child = parent.childActorOf(Props(new Actor { - def receive: Receive = { - case 1 => throw new RuntimeException("expected") - case x => sender() ! x - } - }), "Child") + def receive: Receive = { + case 1 => throw new RuntimeException("expected") + case x => sender() ! x + } + }), "Child") child ! 1 } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala index 88ca8fa5c6..f0e6a38dce 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala @@ -16,15 +16,15 @@ class TestFSMRefSpec extends AkkaSpec { "allow access to state data" in { val fsm = TestFSMRef(new Actor with FSM[Int, String] { - startWith(1, "") - when(1) { - case Event("go", _) => goto(2).using("go") - case Event(StateTimeout, _) => goto(2).using("timeout") - } - when(2) { - case Event("back", _) => goto(1).using("back") - } - }, "test-fsm-ref-1") + startWith(1, "") + when(1) { + case Event("go", _) => goto(2).using("go") + case Event(StateTimeout, _) => goto(2).using("timeout") + } + when(2) { + case Event("back", _) => goto(1).using("back") + } + }, "test-fsm-ref-1") fsm.stateName should ===(1) fsm.stateData should ===("") fsm ! "go" @@ -44,11 +44,11 @@ class TestFSMRefSpec extends AkkaSpec { "allow access to timers" in { val fsm = TestFSMRef(new Actor with FSM[Int, Null] { - startWith(1, null) - when(1) { - case _ => stay() - } - }, "test-fsm-ref-2") + startWith(1, null) + when(1) { + case _ => stay() + } + }, "test-fsm-ref-2") fsm.isTimerActive("test") should ===(false) fsm.startTimerWithFixedDelay("test", 12, 10 millis) fsm.isTimerActive("test") should ===(true) diff --git a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala index d0d8b9c341..93561189a0 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala @@ -77,7 +77,7 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { awaitAssert { child ! "hello" - restarts.get() should be > (1) + restarts.get() should be > 1 } } @@ -115,7 +115,7 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { } "have an AutoPilot" in { - //#autopilot + // #autopilot val probe = TestProbe() probe.setAutoPilot(new TestActor.AutoPilot { def run(sender: ActorRef, msg: Any): TestActor.AutoPilot = @@ -124,7 +124,7 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { case x => testActor.tell(x, sender); TestActor.KeepRunning } }) - //#autopilot + // #autopilot probe.ref ! "hallo" probe.ref ! "welt" probe.ref ! "stop" diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala index dbeb530861..809a3fba27 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala @@ -21,10 +21,11 @@ private[akka] class FileDescriptorMetricSet(os: OperatingSystemMXBean = Manageme override def getMetrics: util.Map[String, Metric] = { Map[String, Metric](name("file-descriptors", "open") -> new Gauge[Long] { - override def getValue: Long = invoke("getOpenFileDescriptorCount") - }, name("file-descriptors", "max") -> new Gauge[Long] { - override def getValue: Long = invoke("getMaxFileDescriptorCount") - }, name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava + override def getValue: Long = invoke("getOpenFileDescriptorCount") + }, + name("file-descriptors", "max") -> new Gauge[Long] { + override def getValue: Long = invoke("getMaxFileDescriptorCount") + }, name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava } private def invoke(name: String): Long = { diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala index 975e1b758f..f5f5bdf379 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala @@ -103,6 +103,6 @@ private[metrics] trait MetricsPrefix extends MetricSet { abstract override def getMetrics: util.Map[String, Metric] = { // does not have to be fast, is only called once during registering registry import akka.util.ccompat.JavaConverters._ - (super.getMetrics.asScala.map { case (k, v) => (prefix / k).toString -> v }).asJava + super.getMetrics.asScala.map { case (k, v) => (prefix / k).toString -> v }.asJava } } diff --git a/build.sbt b/build.sbt index debe780f4e..2d24e1b5ee 100644 --- a/build.sbt +++ b/build.sbt @@ -74,17 +74,17 @@ lazy val userProjects: Seq[ProjectReference] = List[ProjectReference]( testkit) lazy val aggregatedProjects: Seq[ProjectReference] = userProjects ++ List[ProjectReference]( - actorTests, - actorTypedTests, - benchJmh, - docs, - billOfMaterials, - persistenceShared, - persistenceTck, - persistenceTypedTests, - remoteTests, - streamTests, - streamTestsTck) + actorTests, + actorTypedTests, + benchJmh, + docs, + billOfMaterials, + persistenceShared, + persistenceTck, + persistenceTypedTests, + remoteTests, + streamTests, + streamTestsTck) lazy val root = Project(id = "akka", base = file(".")) .aggregate(aggregatedProjects: _*) @@ -92,13 +92,13 @@ lazy val root = Project(id = "akka", base = file(".")) .settings(rootSettings: _*) .settings( unidocRootIgnoreProjects := Seq( - remoteTests, - benchJmh, - protobuf, - protobufV3, - akkaScalaNightly, - docs, - serialversionRemoverPlugin)) + remoteTests, + benchJmh, + protobuf, + protobufV3, + akkaScalaNightly, + docs, + serialversionRemoverPlugin)) .settings(Compile / headerCreate / unmanagedSources := (baseDirectory.value / "project").**("*.scala").get) .settings(akka.AkkaBuild.welcomeSettings) .enablePlugins(CopyrightHeaderForBuild) @@ -311,7 +311,7 @@ lazy val persistenceTck = akkaModule("akka-persistence-tck") .dependsOn(persistence % "compile->compile;test->test", testkit % "compile->compile;test->test") .settings(Dependencies.persistenceTck) .settings(AutomaticModuleName.settings("akka.persistence.tck")) - //.settings(OSGi.persistenceTck) TODO: we do need to export this as OSGi bundle too? + // .settings(OSGi.persistenceTck) TODO: we do need to export this as OSGi bundle too? .settings(Test / fork := true) .disablePlugins(MimaPlugin) @@ -353,21 +353,21 @@ lazy val protobufV3 = akkaModule("akka-protobuf-v3") .settings( libraryDependencies += Dependencies.Compile.Provided.protobufRuntime, assembly / assemblyShadeRules := Seq( - ShadeRule - .rename("com.google.protobuf.**" -> "akka.protobufv3.internal.@1") - // https://github.com/sbt/sbt-assembly/issues/400 - .inLibrary(Dependencies.Compile.Provided.protobufRuntime) - .inProject), + ShadeRule + .rename("com.google.protobuf.**" -> "akka.protobufv3.internal.@1") + // https://github.com/sbt/sbt-assembly/issues/400 + .inLibrary(Dependencies.Compile.Provided.protobufRuntime) + .inProject), assembly / assemblyOption := (assembly / assemblyOption).value.withIncludeScala(false).withIncludeBin(false), autoScalaLibrary := false, // do not include scala dependency in pom exportJars := true, // in dependent projects, use assembled and shaded jar makePomConfiguration := makePomConfiguration.value - .withConfigurations(Vector(Compile)), // prevent original dependency to be added to pom as runtime dep + .withConfigurations(Vector(Compile)), // prevent original dependency to be added to pom as runtime dep Compile / packageBin / packagedArtifact := Scoped.mkTuple2( - (Compile / packageBin / artifact).value, - ReproducibleBuildsPlugin.postProcessJar(OsgiKeys.bundle.value)), + (Compile / packageBin / artifact).value, + ReproducibleBuildsPlugin.postProcessJar(OsgiKeys.bundle.value)), Compile / packageBin := ReproducibleBuildsPlugin - .postProcessJar((Compile / assembly).value), // package by running assembly + .postProcessJar((Compile / assembly).value), // package by running assembly // Prevent cyclic task dependencies, see https://github.com/sbt/sbt-assembly/issues/365 assembly / fullClasspath := (Runtime / managedClasspath).value, // otherwise, there's a cyclic dependency between packageBin and assembly assembly / test := {}, // assembly runs tests for unknown reason which introduces another cyclic dependency to packageBin via exportedJars @@ -589,10 +589,10 @@ lazy val serialversionRemoverPlugin = lazy val serialversionRemoverPluginSettings = Seq( Compile / scalacOptions ++= ( - if (scalaVersion.value.startsWith("3.")) - Seq("-Xplugin:" + (serialversionRemoverPlugin / Compile / Keys.`package`).value.getAbsolutePath.toString) - else Nil - )) + if (scalaVersion.value.startsWith("3.")) + Seq("-Xplugin:" + (serialversionRemoverPlugin / Compile / Keys.`package`).value.getAbsolutePath.toString) + else Nil + )) def akkaModule(name: String): Project = Project(id = name, base = file(name)) @@ -620,7 +620,8 @@ addCommandAlias("allClusterSharding", commandValue(clusterSharding)) addCommandAlias("allClusterTools", commandValue(clusterTools)) addCommandAlias( "allCluster", - Seq(commandValue(cluster), commandValue(distributedData), commandValue(clusterSharding), commandValue(clusterTools)).mkString) + Seq(commandValue(cluster), commandValue(distributedData), commandValue(clusterSharding), + commandValue(clusterTools)).mkString) addCommandAlias("allCoordination", commandValue(coordination)) addCommandAlias("allDistributedData", commandValue(distributedData)) addCommandAlias("allPersistence", commandValue(persistence)) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index bae9050f9e..86b0f43570 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -37,8 +37,8 @@ object AkkaBuild { UnidocRoot.akkaSettings, Protobuf.settings, GlobalScope / parallelExecution := System - .getProperty("akka.parallelExecution", parallelExecutionByDefault.toString) - .toBoolean, + .getProperty("akka.parallelExecution", parallelExecutionByDefault.toString) + .toBoolean, // used for linking to API docs (overwrites `project-info.version`) ThisBuild / projectInfoVersion := { if (isSnapshot.value) "snapshot" else version.value }) @@ -68,15 +68,15 @@ object AkkaBuild { Seq( otherResolvers := resolver :: publishTo.value.toList, publishM2Configuration := Classpaths.publishConfig( - publishMavenStyle.value, - deliverPattern(crossTarget.value), - if (isSnapshot.value) "integration" else "release", - ivyConfigurations.value.map(c => ConfigRef(c.name)).toVector, - artifacts = packagedArtifacts.value.toVector, - resolverName = resolver.name, - checksums = (publishM2 / checksums).value.toVector, - logging = ivyLoggingLevel.value, - overwrite = true))) + publishMavenStyle.value, + deliverPattern(crossTarget.value), + if (isSnapshot.value) "integration" else "release", + ivyConfigurations.value.map(c => ConfigRef(c.name)).toVector, + artifacts = packagedArtifacts.value.toVector, + resolverName = resolver.name, + checksums = (publishM2 / checksums).value.toVector, + logging = ivyLoggingLevel.value, + overwrite = true))) } lazy val resolverSettings = Def.settings( @@ -140,13 +140,13 @@ object AkkaBuild { scalaVersion.value), Compile / scalacOptions ++= (if (allWarnings) Seq("-deprecation") else Nil), Test / scalacOptions := (Test / scalacOptions).value.filterNot(opt => - opt == "-Xlog-reflective-calls" || opt.contains("genjavadoc")), + opt == "-Xlog-reflective-calls" || opt.contains("genjavadoc")), Compile / javacOptions ++= { DefaultJavacOptions ++ JdkOptions.targetJdkJavacOptions(targetSystemJdk.value, optionalDir(jdk8home.value), fullJavaHomes.value) }, Test / javacOptions ++= DefaultJavacOptions ++ - JdkOptions.targetJdkJavacOptions(targetSystemJdk.value, optionalDir(jdk8home.value), fullJavaHomes.value), + JdkOptions.targetJdkJavacOptions(targetSystemJdk.value, optionalDir(jdk8home.value), fullJavaHomes.value), Compile / javacOptions ++= (if (allWarnings) Seq("-Xlint:deprecation") else Nil), doc / javacOptions := Seq(), crossVersion := CrossVersion.binary, @@ -165,10 +165,10 @@ object AkkaBuild { homepage := Some(url("https://akka.io/")), description := "Akka is a toolkit for building highly concurrent, distributed, and resilient message-driven applications for Java and Scala.", scmInfo := Some( - ScmInfo( - url("https://github.com/akka/akka"), - "scm:git:https://github.com/akka/akka.git", - "scm:git:git@github.com:akka/akka.git")), + ScmInfo( + url("https://github.com/akka/akka"), + "scm:git:https://github.com/akka/akka.git", + "scm:git:git@github.com:akka/akka.git")), apiURL := Some(url(s"https://doc.akka.io/api/akka/${version.value}")), initialCommands := """|import language.postfixOps @@ -244,8 +244,8 @@ object AkkaBuild { } }, Test / parallelExecution := System - .getProperty("akka.parallelExecution", parallelExecutionByDefault.toString) - .toBoolean, + .getProperty("akka.parallelExecution", parallelExecutionByDefault.toString) + .toBoolean, Test / logBuffered := System.getProperty("akka.logBufferedTests", "false").toBoolean, // show full stack traces and test case durations Test / testOptions += Tests.Argument("-oDF"), @@ -278,18 +278,18 @@ object AkkaBuild { }, logoColor := scala.Console.BLUE, usefulTasks := Seq( - UsefulTask("", "compile", "Compile the current project"), - UsefulTask("", "test", "Run all the tests "), - UsefulTask("", "testOnly *.AnySpec", "Only run a selected test"), - UsefulTask("", "verifyCodeStyle", "Verify code style"), - UsefulTask("", "applyCodeStyle", "Apply code style"), - UsefulTask("", "sortImports", "Sort the imports"), - UsefulTask("", "mimaReportBinaryIssues ", "Check binary issues"), - UsefulTask("", "validatePullRequest ", "Validate pull request"), - UsefulTask("", "akka-docs/paradox", "Build documentation"), - UsefulTask("", "akka-docs/paradoxBrowse", "Browse the generated documentation"), - UsefulTask("", "tips:", "prefix commands with `+` to run against cross Scala versions."), - UsefulTask("", "Contributing guide:", "https://github.com/akka/akka/blob/main/CONTRIBUTING.md"))) + UsefulTask("", "compile", "Compile the current project"), + UsefulTask("", "test", "Run all the tests "), + UsefulTask("", "testOnly *.AnySpec", "Only run a selected test"), + UsefulTask("", "verifyCodeStyle", "Verify code style"), + UsefulTask("", "applyCodeStyle", "Apply code style"), + UsefulTask("", "sortImports", "Sort the imports"), + UsefulTask("", "mimaReportBinaryIssues ", "Check binary issues"), + UsefulTask("", "validatePullRequest ", "Validate pull request"), + UsefulTask("", "akka-docs/paradox", "Build documentation"), + UsefulTask("", "akka-docs/paradoxBrowse", "Browse the generated documentation"), + UsefulTask("", "tips:", "prefix commands with `+` to run against cross Scala versions."), + UsefulTask("", "Contributing guide:", "https://github.com/akka/akka/blob/main/CONTRIBUTING.md"))) } private def optionalDir(path: String): Option[File] = @@ -330,7 +330,7 @@ object AkkaBuild { // * When subprojects need to be excluded, ++ needs to be specified for each command // // So the `++` equivalent of the above example is `sbt "++ 3.1.2 clean" "++ 3.1.2 compile"` - val switchVersion: Command = Command.args("+~", " ")({ (initialState: State, args: Seq[String]) => + val switchVersion: Command = Command.args("+~", " ") { (initialState: State, args: Seq[String]) => { val requestedVersionPrefix = args.head val requestedVersion = Dependencies.allScalaVersions.filter(_.startsWith(requestedVersionPrefix)).head @@ -347,5 +347,5 @@ object AkkaBuild { val commands = args.tail commands.foldLeft(initialState)(run) } - }) + } } diff --git a/project/AkkaDisciplinePlugin.scala b/project/AkkaDisciplinePlugin.scala index bde3e5fa0a..090c226d5b 100644 --- a/project/AkkaDisciplinePlugin.scala +++ b/project/AkkaDisciplinePlugin.scala @@ -71,13 +71,13 @@ object AkkaDisciplinePlugin extends AutoPlugin { lazy val nowarnSettings = Seq( Compile / scalacOptions ++= ( - if (scalaVersion.value.startsWith("3.")) Nil - else Seq(defaultScalaOptions) - ), + if (scalaVersion.value.startsWith("3.")) Nil + else Seq(defaultScalaOptions) + ), Test / scalacOptions ++= ( - if (scalaVersion.value.startsWith("3.")) Nil - else Seq(defaultScalaOptions) - ), + if (scalaVersion.value.startsWith("3.")) Nil + else Seq(defaultScalaOptions) + ), Compile / doc / scalacOptions := Seq()) /** @@ -87,15 +87,15 @@ object AkkaDisciplinePlugin extends AutoPlugin { Seq( Compile / scalacOptions -= defaultScalaOptions, Compile / scalacOptions ++= ( - if (scalaVersion.value.startsWith("3.")) Nil - else Seq("-Wconf:cat=unused:s,cat=deprecation:s,cat=unchecked:s,any:e") - ), + if (scalaVersion.value.startsWith("3.")) Nil + else Seq("-Wconf:cat=unused:s,cat=deprecation:s,cat=unchecked:s,any:e") + ), Test / scalacOptions --= Seq("-Xlint", "-unchecked", "-deprecation"), Test / scalacOptions -= defaultScalaOptions, Test / scalacOptions ++= ( - if (scalaVersion.value.startsWith("3.")) Nil - else Seq("-Wconf:cat=unused:s,cat=deprecation:s,cat=unchecked:s,any:e") - ), + if (scalaVersion.value.startsWith("3.")) Nil + else Seq("-Wconf:cat=unused:s,cat=deprecation:s,cat=unchecked:s,any:e") + ), Compile / doc / scalacOptions := Seq()) lazy val disciplineSettings = @@ -104,28 +104,28 @@ object AkkaDisciplinePlugin extends AutoPlugin { Compile / scalacOptions ++= Seq("-Xfatal-warnings"), Test / scalacOptions --= testUndiscipline, Compile / javacOptions ++= ( - if (scalaVersion.value.startsWith("3.")) { - Seq() - } else { - if (!nonFatalJavaWarningsFor(name.value)) Seq("-Werror", "-Xlint:deprecation", "-Xlint:unchecked") - else Seq.empty - } - ), + if (scalaVersion.value.startsWith("3.")) { + Seq() + } else { + if (!nonFatalJavaWarningsFor(name.value)) Seq("-Werror", "-Xlint:deprecation", "-Xlint:unchecked") + else Seq.empty + } + ), Compile / doc / javacOptions := Seq("-Xdoclint:none"), Compile / scalacOptions ++= (CrossVersion.partialVersion(scalaVersion.value) match { - case Some((2, 13)) => - disciplineScalacOptions -- Set( - "-Ywarn-inaccessible", - "-Ywarn-infer-any", - "-Ywarn-nullary-override", - "-Ywarn-nullary-unit", - "-Ypartial-unification", - "-Yno-adapted-args") - case Some((2, 12)) => - disciplineScalacOptions - case _ => - Nil - }).toSeq, + case Some((2, 13)) => + disciplineScalacOptions -- Set( + "-Ywarn-inaccessible", + "-Ywarn-infer-any", + "-Ywarn-nullary-override", + "-Ywarn-nullary-unit", + "-Ypartial-unification", + "-Yno-adapted-args") + case Some((2, 12)) => + disciplineScalacOptions + case _ => + Nil + }).toSeq, Compile / scalacOptions --= (if (looseProjects.contains(name.value)) undisciplineScalacOptions.toSeq else Seq.empty), @@ -150,7 +150,7 @@ object AkkaDisciplinePlugin extends AutoPlugin { */ val undisciplineScalacOptions = Set("-Ywarn-numeric-widen") - /** These options are desired, but some are excluded for the time being*/ + /** These options are desired, but some are excluded for the time being */ val disciplineScalacOptions = Set( "-Ywarn-numeric-widen", "-Yno-adapted-args", diff --git a/project/CopyrightHeader.scala b/project/CopyrightHeader.scala index f3398e8f5e..cabf6f9afe 100644 --- a/project/CopyrightHeader.scala +++ b/project/CopyrightHeader.scala @@ -23,21 +23,22 @@ trait CopyrightHeader extends AutoPlugin { Seq( headerLicense := Some(HeaderLicense.Custom(headerFor(CurrentYear))), headerMappings := headerMappings.value ++ Map( - HeaderFileType.scala -> cStyleComment, - HeaderFileType.java -> cStyleComment, - HeaderFileType("template") -> cStyleComment))) + HeaderFileType.scala -> cStyleComment, + HeaderFileType.java -> cStyleComment, + HeaderFileType("template") -> cStyleComment))) } override def projectSettings: Seq[Def.Setting[_]] = Def.settings(headerMappingSettings, additional) def additional: Seq[Def.Setting[_]] = Def.settings(Compile / compile := { - (Compile / headerCreate).value - (Compile / compile).value - }, Test / compile := { - (Test / headerCreate).value - (Test / compile).value - }) + (Compile / headerCreate).value + (Compile / compile).value + }, + Test / compile := { + (Test / headerCreate).value + (Test / compile).value + }) // We hard-code this so PR's created in year X will not suddenly fail in X+1. // Of course we should remember to update it early in the year. diff --git a/project/CopyrightHeaderForBoilerplate.scala b/project/CopyrightHeaderForBoilerplate.scala index 1897721582..98e7b07058 100644 --- a/project/CopyrightHeaderForBoilerplate.scala +++ b/project/CopyrightHeaderForBoilerplate.scala @@ -7,7 +7,7 @@ package akka import de.heikoseeberger.sbtheader.HeaderPlugin import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport._ import sbt.Keys.sourceDirectory -import sbt.{ Compile, Def, Plugins, Test, inConfig, _ } +import sbt.{ inConfig, Compile, Def, Plugins, Test, _ } import spray.boilerplate.BoilerplatePlugin object CopyrightHeaderForBoilerplate extends CopyrightHeader { diff --git a/project/CopyrightHeaderForBuild.scala b/project/CopyrightHeaderForBuild.scala index 31384bc0ce..a676fde398 100644 --- a/project/CopyrightHeaderForBuild.scala +++ b/project/CopyrightHeaderForBuild.scala @@ -6,7 +6,7 @@ package akka import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport.{ headerMappings, headerSources, HeaderFileType } import sbt.Keys.baseDirectory -import sbt.{ Compile, Def, PluginTrigger, Test, inConfig, _ } +import sbt.{ inConfig, Compile, Def, PluginTrigger, Test, _ } object CopyrightHeaderForBuild extends CopyrightHeader { override def trigger: PluginTrigger = noTrigger diff --git a/project/CopyrightHeaderForProtobuf.scala b/project/CopyrightHeaderForProtobuf.scala index 6ff62258e4..37330545d0 100644 --- a/project/CopyrightHeaderForProtobuf.scala +++ b/project/CopyrightHeaderForProtobuf.scala @@ -6,7 +6,7 @@ package akka import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport.{ headerMappings, headerSources, HeaderFileType } import sbt.Keys.sourceDirectory -import sbt.{ Compile, Def, Test, inConfig, _ } +import sbt.{ inConfig, Compile, Def, Test, _ } object CopyrightHeaderForProtobuf extends CopyrightHeader { override protected def headerMappingSettings: Seq[Def.Setting[_]] = { diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 29b7b635bc..e3a2c9ad1c 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -75,15 +75,16 @@ object Dependencies { val scalaCheckVersion = "1.15.1" val Versions = - Seq(crossScalaVersions := allScalaVersions, scalaVersion := allScalaVersions.head, java8CompatVersion := { - CrossVersion.partialVersion(scalaVersion.value) match { - // java8-compat is only used in a couple of places for 2.13, - // it is probably possible to remove the dependency if needed. - case Some((3, _)) => "1.0.0" - case Some((2, n)) if n >= 13 => "1.0.0" - case _ => "0.8.0" - } - }) + Seq(crossScalaVersions := allScalaVersions, scalaVersion := allScalaVersions.head, + java8CompatVersion := { + CrossVersion.partialVersion(scalaVersion.value) match { + // java8-compat is only used in a couple of places for 2.13, + // it is probably possible to remove the dependency if needed. + case Some((3, _)) => "1.0.0" + case Some((2, n)) if n >= 13 => "1.0.0" + case _ => "0.8.0" + } + }) object Compile { // Compile @@ -117,7 +118,7 @@ object Dependencies { // For Java 8 Conversions val java8Compat = Def.setting { - ("org.scala-lang.modules" %% "scala-java8-compat" % java8CompatVersion.value) + "org.scala-lang.modules" %% "scala-java8-compat" % java8CompatVersion.value } // Scala License val aeronDriver = "io.aeron" % "aeron-driver" % aeronVersion // ApacheV2 @@ -252,39 +253,39 @@ object Dependencies { val testkit = l ++= Seq(TestDependencies.junit, TestDependencies.scalatest.value) ++ TestDependencies.metricsAll val actorTests = l ++= Seq( - TestDependencies.junit, - TestDependencies.scalatest.value, - TestDependencies.scalatestJUnit.value, - TestDependencies.scalatestScalaCheck.value, - TestDependencies.commonsCodec, - TestDependencies.commonsMath, - TestDependencies.jimfs, - TestDependencies.dockerClient, - Provided.activation // dockerClient needs javax.activation.DataSource in JDK 11+ - ) + TestDependencies.junit, + TestDependencies.scalatest.value, + TestDependencies.scalatestJUnit.value, + TestDependencies.scalatestScalaCheck.value, + TestDependencies.commonsCodec, + TestDependencies.commonsMath, + TestDependencies.jimfs, + TestDependencies.dockerClient, + Provided.activation // dockerClient needs javax.activation.DataSource in JDK 11+ + ) val actorTestkitTyped = l ++= Seq( - Provided.logback, - Provided.junit, - Provided.scalatest.value, - TestDependencies.scalatestJUnit.value) + Provided.logback, + Provided.junit, + Provided.scalatest.value, + TestDependencies.scalatestJUnit.value) val pki = l ++= - Seq( - asnOne, - // pull up slf4j version from the one provided transitively in asnOne to fix unidoc - Compile.slf4jApi, - TestDependencies.scalatest.value) + Seq( + asnOne, + // pull up slf4j version from the one provided transitively in asnOne to fix unidoc + Compile.slf4jApi, + TestDependencies.scalatest.value) val remoteDependencies = Seq(netty, aeronDriver, aeronClient) val remoteOptionalDependencies = remoteDependencies.map(_ % "optional") val remote = l ++= Seq( - agrona, - TestDependencies.junit, - TestDependencies.scalatest.value, - TestDependencies.jimfs, - TestDependencies.protobufRuntime) ++ remoteOptionalDependencies + agrona, + TestDependencies.junit, + TestDependencies.scalatest.value, + TestDependencies.jimfs, + TestDependencies.protobufRuntime) ++ remoteOptionalDependencies val remoteTests = l ++= Seq(TestDependencies.junit, TestDependencies.scalatest.value) ++ remoteDependencies @@ -295,45 +296,45 @@ object Dependencies { val clusterTools = l ++= Seq(TestDependencies.junit, TestDependencies.scalatest.value) val clusterSharding = l ++= Seq( - Provided.levelDBmultiJVM, - Provided.levelDBNative, - TestDependencies.junit, - TestDependencies.scalatest.value, - TestDependencies.commonsIo, - TestDependencies.ycsb) + Provided.levelDBmultiJVM, + Provided.levelDBNative, + TestDependencies.junit, + TestDependencies.scalatest.value, + TestDependencies.commonsIo, + TestDependencies.ycsb) val clusterMetrics = l ++= Seq( - Provided.sigarLoader, - TestDependencies.slf4jJul, - TestDependencies.slf4jLog4j, - TestDependencies.logback, - TestDependencies.scalatestMockito.value) + Provided.sigarLoader, + TestDependencies.slf4jJul, + TestDependencies.slf4jLog4j, + TestDependencies.logback, + TestDependencies.scalatestMockito.value) val distributedData = l ++= Seq(lmdb, TestDependencies.junit, TestDependencies.scalatest.value) val slf4j = l ++= Seq(slf4jApi, TestDependencies.logback) val persistence = l ++= Seq( - Provided.levelDB, - Provided.levelDBNative, - TestDependencies.scalatest.value, - TestDependencies.scalatestJUnit.value, - TestDependencies.junit, - TestDependencies.commonsIo, - TestDependencies.commonsCodec) + Provided.levelDB, + Provided.levelDBNative, + TestDependencies.scalatest.value, + TestDependencies.scalatestJUnit.value, + TestDependencies.junit, + TestDependencies.commonsIo, + TestDependencies.commonsCodec) val persistenceQuery = l ++= Seq( - TestDependencies.scalatest.value, - TestDependencies.junit, - TestDependencies.commonsIo, - Provided.levelDB, - Provided.levelDBNative) + TestDependencies.scalatest.value, + TestDependencies.junit, + TestDependencies.commonsIo, + Provided.levelDB, + Provided.levelDBNative) val persistenceTck = l ++= Seq( - TestDependencies.scalatest.value.withConfigurations(Some("compile")), - TestDependencies.junit.withConfigurations(Some("compile")), - Provided.levelDB, - Provided.levelDBNative) + TestDependencies.scalatest.value.withConfigurations(Some("compile")), + TestDependencies.junit.withConfigurations(Some("compile")), + Provided.levelDB, + Provided.levelDBNative) val persistenceTestKit = l ++= Seq(TestDependencies.scalatest.value, TestDependencies.logback) @@ -342,33 +343,33 @@ object Dependencies { val persistenceShared = l ++= Seq(Provided.levelDB, Provided.levelDBNative, TestDependencies.logback) val jackson = l ++= Seq( - jacksonCore.value, - jacksonAnnotations.value, - jacksonDatabind.value, - jacksonJdk8.value, - jacksonJsr310.value, - jacksonParameterNames.value, - jacksonCbor.value, - jacksonScala.value, - lz4Java, - TestDependencies.junit, - TestDependencies.scalatest.value) + jacksonCore.value, + jacksonAnnotations.value, + jacksonDatabind.value, + jacksonJdk8.value, + jacksonJsr310.value, + jacksonParameterNames.value, + jacksonCbor.value, + jacksonScala.value, + lz4Java, + TestDependencies.junit, + TestDependencies.scalatest.value) val osgi = l ++= Seq( - osgiCore, - osgiCompendium, - TestDependencies.logback, - TestDependencies.commonsIo, - TestDependencies.pojosr, - TestDependencies.tinybundles, - TestDependencies.scalatest.value, - TestDependencies.junit) + osgiCore, + osgiCompendium, + TestDependencies.logback, + TestDependencies.commonsIo, + TestDependencies.pojosr, + TestDependencies.tinybundles, + TestDependencies.scalatest.value, + TestDependencies.junit) val docs = l ++= Seq( - TestDependencies.scalatest.value, - TestDependencies.junit, - Docs.sprayJson, - Docs.gson, - Provided.levelDB) + TestDependencies.scalatest.value, + TestDependencies.junit, + Docs.sprayJson, + Docs.gson, + Provided.levelDB) val benchJmh = l ++= Seq(logback, Provided.levelDB, Provided.levelDBNative, Compile.jctools) @@ -377,23 +378,23 @@ object Dependencies { lazy val stream = l ++= Seq[sbt.ModuleID](reactiveStreams, sslConfigCore.value, TestDependencies.scalatest.value) lazy val streamTestkit = l ++= Seq( - TestDependencies.scalatest.value, - TestDependencies.scalatestScalaCheck.value, - TestDependencies.junit) + TestDependencies.scalatest.value, + TestDependencies.scalatestScalaCheck.value, + TestDependencies.junit) lazy val streamTests = l ++= Seq( - TestDependencies.scalatest.value, - TestDependencies.scalatestScalaCheck.value, - TestDependencies.junit, - TestDependencies.commonsIo, - TestDependencies.jimfs) + TestDependencies.scalatest.value, + TestDependencies.scalatestScalaCheck.value, + TestDependencies.junit, + TestDependencies.commonsIo, + TestDependencies.jimfs) lazy val streamTestsTck = l ++= Seq( - TestDependencies.scalatest.value, - TestDependencies.scalatestTestNG.value, - TestDependencies.scalatestScalaCheck.value, - TestDependencies.junit, - TestDependencies.reactiveStreamsTck) + TestDependencies.scalatest.value, + TestDependencies.scalatestTestNG.value, + TestDependencies.scalatestScalaCheck.value, + TestDependencies.junit, + TestDependencies.reactiveStreamsTck) } diff --git a/project/Doc.scala b/project/Doc.scala index 5aa3ae8748..677be3547b 100644 --- a/project/Doc.scala +++ b/project/Doc.scala @@ -39,9 +39,9 @@ object Scaladoc extends AutoPlugin { // Publishing scala3 docs is broken (https://github.com/akka/akka/issues/30788), // for now we just skip it: Compile / doc / sources := ( - if (scalaVersion.value.startsWith("3.")) Seq() - else (Compile / doc / sources).value - ), + if (scalaVersion.value.startsWith("3.")) Seq() + else (Compile / doc / sources).value + ), Compile / validateDiagrams := true) ++ CliOptions.scaladocDiagramsEnabled.ifTrue(Compile / doc := { val docs = (Compile / doc).value @@ -80,19 +80,19 @@ object Scaladoc extends AutoPlugin { val hasDiagram = files.exists { f => val name = f.getName if (name.endsWith(".html") && !name.startsWith("index-") && - !name.equals("index.html") && !name.equals("package.html")) { + !name.equals("index.html") && !name.equals("package.html")) { val source = scala.io.Source.fromFile(f)(scala.io.Codec.UTF8) - val hd = try source - .getLines() - .exists( - lines => - lines.contains( - "
    ") || - lines.contains(" - throw new IllegalStateException("Scaladoc verification failed for file '" + f + "'", e) - } finally source.close() + val hd = + try source + .getLines() + .exists(lines => + lines.contains( + "
    ") || + lines.contains(" + throw new IllegalStateException("Scaladoc verification failed for file '" + f + "'", e) + } finally source.close() hd } else false } @@ -149,7 +149,7 @@ object UnidocRoot extends AutoPlugin { val releaseVersion = if (isSnapshot.value) "snapshot" else version.value (Compile / unidoc).value match { case Seq(japi, api) => - Seq((japi -> s"www/japi/akka/$releaseVersion"), (api -> s"www/api/akka/$releaseVersion")) + Seq(japi -> s"www/japi/akka/$releaseVersion", api -> s"www/api/akka/$releaseVersion") } })) .getOrElse(Nil) @@ -166,20 +166,19 @@ object UnidocRoot extends AutoPlugin { UnidocRoot.CliOptions.genjavadocEnabled .ifTrue(Seq(JavaUnidoc / unidocAllSources ~= { v => v.map( - _.filterNot( - s => - // akka.stream.scaladsl.GraphDSL.Implicits.ReversePortsOps - // contains code that genjavadoc turns into (probably - // incorrect) Java code that in turn confuses the javadoc - // tool. - s.getAbsolutePath.endsWith("scaladsl/GraphDSL.java") || - // Since adding -P:genjavadoc:strictVisibility=true, - // the javadoc tool would NullPointerException while - // determining the upper bound for some generics: - s.getAbsolutePath.endsWith("TopicImpl.java") || - s.getAbsolutePath.endsWith("PersistencePlugin.java") || - s.getAbsolutePath.endsWith("GraphDelegate.java") || - s.getAbsolutePath.contains("/impl/"))) + _.filterNot(s => + // akka.stream.scaladsl.GraphDSL.Implicits.ReversePortsOps + // contains code that genjavadoc turns into (probably + // incorrect) Java code that in turn confuses the javadoc + // tool. + s.getAbsolutePath.endsWith("scaladsl/GraphDSL.java") || + // Since adding -P:genjavadoc:strictVisibility=true, + // the javadoc tool would NullPointerException while + // determining the upper bound for some generics: + s.getAbsolutePath.endsWith("TopicImpl.java") || + s.getAbsolutePath.endsWith("PersistencePlugin.java") || + s.getAbsolutePath.endsWith("GraphDelegate.java") || + s.getAbsolutePath.contains("/impl/"))) })) .getOrElse(Nil)) } @@ -204,8 +203,8 @@ object BootstrapGenjavadoc extends AutoPlugin { .ifTrue(Seq( unidocGenjavadocVersion := "0.18", Compile / scalacOptions ++= Seq( - "-P:genjavadoc:fabricateParams=false", - "-P:genjavadoc:suppressSynthetic=false", - "-P:genjavadoc:strictVisibility=true"))) + "-P:genjavadoc:fabricateParams=false", + "-P:genjavadoc:suppressSynthetic=false", + "-P:genjavadoc:strictVisibility=true"))) .getOrElse(Nil) } diff --git a/project/JavaFormatter.scala b/project/JavaFormatter.scala index 34848f0e3e..55fe2dc8d1 100644 --- a/project/JavaFormatter.scala +++ b/project/JavaFormatter.scala @@ -24,7 +24,7 @@ object JavaFormatter extends AutoPlugin { override def projectSettings: Seq[Def.Setting[_]] = Seq( - //below is for sbt java formatter + // below is for sbt java formatter javafmt / excludeFilter := { val ignoreSupport = new ProjectFileIgnoreSupport((ThisBuild / baseDirectory).value / ignoreConfigFileName, descriptor) diff --git a/project/Jdk9.scala b/project/Jdk9.scala index 459bf2b2cb..278f82aa6a 100644 --- a/project/Jdk9.scala +++ b/project/Jdk9.scala @@ -22,18 +22,18 @@ object Jdk9 extends AutoPlugin { val compileJdk9Settings = Seq( // following the scala-2.12, scala-sbt-1.0, ... convention unmanagedSourceDirectories := notOnJdk8( - Seq( - (Compile / sourceDirectory).value / SCALA_SOURCE_DIRECTORY, - (Compile / sourceDirectory).value / JAVA_SOURCE_DIRECTORY)), + Seq( + (Compile / sourceDirectory).value / SCALA_SOURCE_DIRECTORY, + (Compile / sourceDirectory).value / JAVA_SOURCE_DIRECTORY)), scalacOptions := AkkaBuild.DefaultScalacOptions.value ++ notOnJdk8(Seq("-release", "11")), javacOptions := AkkaBuild.DefaultJavacOptions ++ notOnJdk8(Seq("--release", "11"))) val testJdk9Settings = Seq( // following the scala-2.12, scala-sbt-1.0, ... convention unmanagedSourceDirectories := notOnJdk8( - Seq( - (Test / sourceDirectory).value / SCALA_TEST_SOURCE_DIRECTORY, - (Test / sourceDirectory).value / JAVA_TEST_SOURCE_DIRECTORY)), + Seq( + (Test / sourceDirectory).value / SCALA_TEST_SOURCE_DIRECTORY, + (Test / sourceDirectory).value / JAVA_TEST_SOURCE_DIRECTORY)), scalacOptions := AkkaBuild.DefaultScalacOptions.value ++ notOnJdk8(Seq("-release", "11")), javacOptions := AkkaBuild.DefaultJavacOptions ++ notOnJdk8(Seq("--release", "11")), compile := compile.dependsOn(CompileJdk9 / compile).value, diff --git a/project/JdkOptions.scala b/project/JdkOptions.scala index 787c89805e..0f5814929a 100644 --- a/project/JdkOptions.scala +++ b/project/JdkOptions.scala @@ -53,7 +53,7 @@ object JdkOptions extends AutoPlugin { // explicitly. To test whether this has the desired effect, compile // akka-remote and check the invocation of 'ByteBuffer.clear()' in // EnvelopeBuffer.class with 'javap -c': it should refer to - //""java/nio/ByteBuffer.clear:()Ljava/nio/Buffer" and not + // ""java/nio/ByteBuffer.clear:()Ljava/nio/Buffer" and not // "java/nio/ByteBuffer.clear:()Ljava/nio/ByteBuffer". Issue #27079 (java8home: File) => Seq("-release", "8", "-javabootclasspath", java8home + "/jre/lib/rt.jar")) def targetJdkJavacOptions( diff --git a/project/Jvm.scala b/project/Jvm.scala index 5191558be1..74399da37b 100644 --- a/project/Jvm.scala +++ b/project/Jvm.scala @@ -29,7 +29,7 @@ object Jvm { /** * check if the current operating system is some OS - **/ + */ def isOS(os: String) = try { System.getProperty("os.name").toUpperCase.startsWith(os.toUpperCase) @@ -39,7 +39,7 @@ object Jvm { /** * convert to proper path for the operating system - **/ + */ def osPath(path: String) = if (isOS("WINDOWS")) Process(Seq("cygpath", path)).lineStream.mkString else path def getPodName(hostAndUser: String, sbtLogger: Logger): String = { diff --git a/project/MiMa.scala b/project/MiMa.scala index d4c2ed9c8b..4b4d5ab70a 100644 --- a/project/MiMa.scala +++ b/project/MiMa.scala @@ -27,10 +27,11 @@ object MiMa extends AutoPlugin { checkMimaFilterDirectories := checkFilterDirectories(baseDirectory.value)) def checkFilterDirectories(moduleRoot: File): Unit = { - val nextVersionFilterDir = moduleRoot / "src" / "main" / "mima-filters" / s"2.6.${latestPatchOf26 + 1}.backwards.excludes" + val nextVersionFilterDir = + moduleRoot / "src" / "main" / "mima-filters" / s"2.6.${latestPatchOf26 + 1}.backwards.excludes" if (nextVersionFilterDir.exists()) { throw new IllegalArgumentException(s"Incorrect mima filter directory exists: '${nextVersionFilterDir}' " + - s"should be with number from current release '${moduleRoot / "src" / "main" / "mima-filters" / s"2.6.${latestPatchOf26}.backwards.excludes"}") + s"should be with number from current release '${moduleRoot / "src" / "main" / "mima-filters" / s"2.6.${latestPatchOf26}.backwards.excludes"}") } } diff --git a/project/OSGi.scala b/project/OSGi.scala index 81711445d2..bdd87c46df 100644 --- a/project/OSGi.scala +++ b/project/OSGi.scala @@ -36,17 +36,17 @@ object OSGi { OsgiKeys.requireCapability := "osgi.ee;filter:=\"(&(osgi.ee=JavaSE)(version>=1.8))\"") val actor = osgiSettings ++ Seq( - OsgiKeys.exportPackage := Seq("akka*"), - OsgiKeys.privatePackage := Seq("akka.osgi.impl"), - //akka-actor packages are not imported, as contained in the CP - OsgiKeys.importPackage := (osgiOptionalImports.map(optionalResolution)) ++ Seq( - "!sun.misc", - scalaJava8CompatImport(), - scalaVersion(scalaImport).value, - configImport(), - "*"), - // dynamicImportPackage needed for loading classes defined in configuration - OsgiKeys.dynamicImportPackage := Seq("*")) + OsgiKeys.exportPackage := Seq("akka*"), + OsgiKeys.privatePackage := Seq("akka.osgi.impl"), + // akka-actor packages are not imported, as contained in the CP + OsgiKeys.importPackage := (osgiOptionalImports.map(optionalResolution)) ++ Seq( + "!sun.misc", + scalaJava8CompatImport(), + scalaVersion(scalaImport).value, + configImport(), + "*"), + // dynamicImportPackage needed for loading classes defined in configuration + OsgiKeys.dynamicImportPackage := Seq("*")) val actorTyped = exports(Seq("akka.actor.typed.*")) @@ -65,14 +65,14 @@ object OSGi { val protobuf = exports(Seq("akka.protobuf.*")) val protobufV3 = osgiSettings ++ Seq( - OsgiKeys.importPackage := Seq( - "!sun.misc", - scalaJava8CompatImport(), - scalaVersion(scalaImport).value, - configImport(), - "*"), - OsgiKeys.exportPackage := Seq("akka.protobufv3.internal.*"), - OsgiKeys.privatePackage := Seq("google.protobuf.*")) + OsgiKeys.importPackage := Seq( + "!sun.misc", + scalaJava8CompatImport(), + scalaVersion(scalaImport).value, + configImport(), + "*"), + OsgiKeys.exportPackage := Seq("akka.protobufv3.internal.*"), + OsgiKeys.privatePackage := Seq("google.protobuf.*")) val jackson = exports(Seq("akka.serialization.jackson.*")) diff --git a/project/Paradox.scala b/project/Paradox.scala index f58a39afa9..662871c07f 100644 --- a/project/Paradox.scala +++ b/project/Paradox.scala @@ -17,67 +17,67 @@ object Paradox { val propertiesSettings = Seq( Compile / paradoxProperties ++= Map( - "canonical.base_url" -> "https://doc.akka.io/docs/akka/current", - "github.base_url" -> GitHub - .url(version.value), // for links like this: @github[#1](#1) or @github[83986f9](83986f9) - "extref.akka.http.base_url" -> "https://doc.akka.io/docs/akka-http/current/%s", - "extref.akka-management.base_url" -> "https://doc.akka.io/docs/akka-management/current/%s", - "extref.platform-guide.base_url" -> "https://developer.lightbend.com/docs/akka-platform-guide/%s", - "extref.wikipedia.base_url" -> "https://en.wikipedia.org/wiki/%s", - "extref.github.base_url" -> (GitHub.url(version.value) + "/%s"), // for links to our sources - "extref.samples.base_url" -> "https://developer.lightbend.com/start/?group=akka&project=%s", - "extref.ecs.base_url" -> "https://example.lightbend.com/v1/download/%s", - "scaladoc.akka.base_url" -> "https://doc.akka.io/api/akka/2.6", - "scaladoc.akka.http.base_url" -> "https://doc.akka.io/api/akka-http/current", - "javadoc.java.base_url" -> "https://docs.oracle.com/en/java/javase/11/docs/api/java.base/", - "javadoc.java.link_style" -> "direct", - "javadoc.akka.base_url" -> "https://doc.akka.io/japi/akka/2.6", - "javadoc.akka.link_style" -> "direct", - "javadoc.akka.http.base_url" -> "https://doc.akka.io/japi/akka-http/current", - "javadoc.akka.http.link_style" -> "frames", - "javadoc.com.fasterxml.jackson.annotation.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-annotations/latest/", - "javadoc.com.fasterxml.jackson.annotation.link_style" -> "direct", - "javadoc.com.fasterxml.jackson.databind.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-databind/latest/", - "javadoc.com.fasterxml.jackson.databind.link_style" -> "direct", - "javadoc.com.google.protobuf.base_url" -> "https://javadoc.io/doc/com.google.protobuf/protobuf-java/latest/", - "javadoc.com.google.protobuf.link_style" -> "direct", - "javadoc.com.typesafe.config.base_url" -> "https://javadoc.io/doc/com.typesafe/config/latest/", - "javadoc.com.typesafe.config.link_style" -> "direct", - "javadoc.org.slf4j.base_url" -> "https://www.javadoc.io/doc/org.slf4j/slf4j-api/latest/org.slf4j", - "javadoc.org.slf4j.link_style" -> "direct", - "scala.version" -> scalaVersion.value, - "scala.binary.version" -> scalaBinaryVersion.value, - "akka.version" -> version.value, - "scalatest.version" -> Dependencies.scalaTestVersion.value, - "sigar_loader.version" -> "1.6.6-rev002", - "algolia.docsearch.api_key" -> "543bad5ad786495d9ccd445ed34ed082", - "algolia.docsearch.index_name" -> "akka_io", - "google.analytics.account" -> "UA-21117439-1", - "google.analytics.domain.name" -> "akka.io", - "signature.akka.base_dir" -> (ThisBuild / baseDirectory).value.getAbsolutePath, - "fiddle.code.base_dir" -> (Test / sourceDirectory).value.getAbsolutePath, - "fiddle.akka.base_dir" -> (ThisBuild / baseDirectory).value.getAbsolutePath, - "aeron_version" -> Dependencies.aeronVersion, - "netty_version" -> Dependencies.nettyVersion, - "logback_version" -> Dependencies.logbackVersion)) + "canonical.base_url" -> "https://doc.akka.io/docs/akka/current", + "github.base_url" -> GitHub + .url(version.value), // for links like this: @github[#1](#1) or @github[83986f9](83986f9) + "extref.akka.http.base_url" -> "https://doc.akka.io/docs/akka-http/current/%s", + "extref.akka-management.base_url" -> "https://doc.akka.io/docs/akka-management/current/%s", + "extref.platform-guide.base_url" -> "https://developer.lightbend.com/docs/akka-platform-guide/%s", + "extref.wikipedia.base_url" -> "https://en.wikipedia.org/wiki/%s", + "extref.github.base_url" -> (GitHub.url(version.value) + "/%s"), // for links to our sources + "extref.samples.base_url" -> "https://developer.lightbend.com/start/?group=akka&project=%s", + "extref.ecs.base_url" -> "https://example.lightbend.com/v1/download/%s", + "scaladoc.akka.base_url" -> "https://doc.akka.io/api/akka/2.6", + "scaladoc.akka.http.base_url" -> "https://doc.akka.io/api/akka-http/current", + "javadoc.java.base_url" -> "https://docs.oracle.com/en/java/javase/11/docs/api/java.base/", + "javadoc.java.link_style" -> "direct", + "javadoc.akka.base_url" -> "https://doc.akka.io/japi/akka/2.6", + "javadoc.akka.link_style" -> "direct", + "javadoc.akka.http.base_url" -> "https://doc.akka.io/japi/akka-http/current", + "javadoc.akka.http.link_style" -> "frames", + "javadoc.com.fasterxml.jackson.annotation.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-annotations/latest/", + "javadoc.com.fasterxml.jackson.annotation.link_style" -> "direct", + "javadoc.com.fasterxml.jackson.databind.base_url" -> "https://javadoc.io/doc/com.fasterxml.jackson.core/jackson-databind/latest/", + "javadoc.com.fasterxml.jackson.databind.link_style" -> "direct", + "javadoc.com.google.protobuf.base_url" -> "https://javadoc.io/doc/com.google.protobuf/protobuf-java/latest/", + "javadoc.com.google.protobuf.link_style" -> "direct", + "javadoc.com.typesafe.config.base_url" -> "https://javadoc.io/doc/com.typesafe/config/latest/", + "javadoc.com.typesafe.config.link_style" -> "direct", + "javadoc.org.slf4j.base_url" -> "https://www.javadoc.io/doc/org.slf4j/slf4j-api/latest/org.slf4j", + "javadoc.org.slf4j.link_style" -> "direct", + "scala.version" -> scalaVersion.value, + "scala.binary.version" -> scalaBinaryVersion.value, + "akka.version" -> version.value, + "scalatest.version" -> Dependencies.scalaTestVersion.value, + "sigar_loader.version" -> "1.6.6-rev002", + "algolia.docsearch.api_key" -> "543bad5ad786495d9ccd445ed34ed082", + "algolia.docsearch.index_name" -> "akka_io", + "google.analytics.account" -> "UA-21117439-1", + "google.analytics.domain.name" -> "akka.io", + "signature.akka.base_dir" -> (ThisBuild / baseDirectory).value.getAbsolutePath, + "fiddle.code.base_dir" -> (Test / sourceDirectory).value.getAbsolutePath, + "fiddle.akka.base_dir" -> (ThisBuild / baseDirectory).value.getAbsolutePath, + "aeron_version" -> Dependencies.aeronVersion, + "netty_version" -> Dependencies.nettyVersion, + "logback_version" -> Dependencies.logbackVersion)) val rootsSettings = Seq( paradoxRoots := List( - "index.html", - // Page that recommends Alpakka: - "camel.html", - // TODO page not linked to - "fault-tolerance-sample.html")) + "index.html", + // Page that recommends Alpakka: + "camel.html", + // TODO page not linked to + "fault-tolerance-sample.html")) // FIXME https://github.com/lightbend/paradox/issues/350 // Exclusions from direct compilation for includes dirs/files not belonging in a TOC val includesSettings = Seq( (Compile / paradoxMarkdownToHtml / excludeFilter) := (Compile / paradoxMarkdownToHtml / excludeFilter).value || - ParadoxPlugin.InDirectoryFilter((Compile / paradox / sourceDirectory).value / "includes"), + ParadoxPlugin.InDirectoryFilter((Compile / paradox / sourceDirectory).value / "includes"), // Links are interpreted relative to the page the snippet is included in, // instead of relative to the place where the snippet is declared. (Compile / paradoxMarkdownToHtml / excludeFilter) := (Compile / paradoxMarkdownToHtml / excludeFilter).value || - ParadoxPlugin.InDirectoryFilter((Compile / paradox / sourceDirectory).value / "includes.html")) + ParadoxPlugin.InDirectoryFilter((Compile / paradox / sourceDirectory).value / "includes.html")) val groupsSettings = Seq(Compile / paradoxGroups := Map("Language" -> Seq("Scala", "Java"))) @@ -95,6 +95,6 @@ object Paradox { ApidocPlugin.autoImport.apidocRootPackage := "akka", publishRsyncArtifacts += { val releaseVersion = if (isSnapshot.value) "snapshot" else version.value - ((Compile / paradox).value -> s"www/docs/akka/$releaseVersion") + (Compile / paradox).value -> s"www/docs/akka/$releaseVersion" }) } diff --git a/project/Protobuf.scala b/project/Protobuf.scala index 484447fc02..d6d19d2dc1 100644 --- a/project/Protobuf.scala +++ b/project/Protobuf.scala @@ -53,7 +53,7 @@ object Protobuf { val targets = target.value val cache = targets / "protoc" / "cache" - (sourceDirs.zip(targetDirs)).map { + sourceDirs.zip(targetDirs).map { case (src, dst) => val relative = src .relativeTo(sources) @@ -68,8 +68,8 @@ object Protobuf { _ => true, transformFile( _.replace("com.google.protobuf", "akka.protobufv3.internal") - // this is the one thing that protobufGenerate doesn't fully qualify and causes - // api doc generation to fail + // this is the one thing that protobufGenerate doesn't fully qualify and causes + // api doc generation to fail .replace( "UnusedPrivateParameter", "akka.protobufv3.internal.GeneratedMessageV3.UnusedPrivateParameter")), @@ -89,9 +89,10 @@ object Protobuf { } private def checkProtocVersion(protoc: String, protocVersion: String, log: Logger): Unit = { - val res = callProtoc(protoc, Seq("--version"), log, { (p, l) => - p !! l - }) + val res = callProtoc(protoc, Seq("--version"), log, + { (p, l) => + p !! l + }) val version = res.split(" ").last.trim if (version != protocVersion) { sys.error("Wrong protoc version! Expected %s but got %s".format(protocVersion, version)) @@ -120,7 +121,8 @@ object Protobuf { protoc, Seq("-I" + srcDir.absolutePath, "--java_out=%s".format(targetDir.absolutePath)) ++ protoPathArg ++ protoFiles.map(_.absolutePath), - log, { (p, l) => + log, + { (p, l) => p ! l }) if (exitCode != 0) @@ -157,7 +159,7 @@ object Protobuf { updated } else Set.empty } - val sources = (sourceDir.allPaths).get.toSet + val sources = sourceDir.allPaths.get.toSet runTransform(sources) targetDir } diff --git a/project/Publish.scala b/project/Publish.scala index 04232058f4..12968383d1 100644 --- a/project/Publish.scala +++ b/project/Publish.scala @@ -25,11 +25,11 @@ object Publish extends AutoPlugin { sonatypeProfileName := "com.typesafe", startYear := Some(2009), developers := List( - Developer( - "akka-contributors", - "Akka Contributors", - "akka.official@gmail.com", - url("https://github.com/akka/akka/graphs/contributors"))), + Developer( + "akka-contributors", + "Akka Contributors", + "akka.official@gmail.com", + url("https://github.com/akka/akka/graphs/contributors"))), publishMavenStyle := true, pomIncludeRepository := { x => false diff --git a/project/SbtMultiJvm.scala b/project/SbtMultiJvm.scala index a427ac4977..ad236a8bcd 100644 --- a/project/SbtMultiJvm.scala +++ b/project/SbtMultiJvm.scala @@ -96,10 +96,10 @@ object MultiJvmPlugin extends AutoPlugin { loadedTestFrameworks := (Test / loadedTestFrameworks).value, definedTests := Defaults.detectTests.value, multiJvmTests := collectMultiJvmTests( - definedTests.value, - multiJvmMarker.value, - (MultiJvm / testOptions).value, - streams.value.log), + definedTests.value, + multiJvmMarker.value, + (MultiJvm / testOptions).value, + streams.value.log), multiJvmTestNames := multiJvmTests.map(_.keys.toSeq).storeAs(multiJvmTestNames).triggeredBy(compile).value, multiJvmApps := collectMultiJvm(discoveredMainClasses.value, multiJvmMarker.value), multiJvmAppNames := multiJvmApps.map(_.keys.toSeq).storeAs(multiJvmAppNames).triggeredBy(compile).value, @@ -116,13 +116,13 @@ object MultiJvmPlugin extends AutoPlugin { scalatestClasspath := managedClasspath.value.filter(_.data.name.contains("scalatest")), multiRunCopiedClassLocation := new File(target.value, "multi-run-copied-libraries"), scalatestScalaOptions := scalaOptionsForScalatest( - scalatestRunner.value, - scalatestOptions.value, - fullClasspath.value, - multiRunCopiedClassLocation.value), + scalatestRunner.value, + scalatestOptions.value, + fullClasspath.value, + multiRunCopiedClassLocation.value), scalatestMultiNodeScalaOptions := scalaMultiNodeOptionsForScalatest( - scalatestRunner.value, - scalatestOptions.value), + scalatestRunner.value, + scalatestOptions.value), multiTestOptions := Options(jvmOptions.value, extraOptions.value, scalatestScalaOptions.value), multiNodeTestOptions := Options(jvmOptions.value, extraOptions.value, scalatestMultiNodeScalaOptions.value), appScalaOptions := scalaOptionsForApps(fullClasspath.value), @@ -145,10 +145,10 @@ object MultiJvmPlugin extends AutoPlugin { multiNodeHosts := Seq.empty, multiNodeHostsFileName := "multi-node-test.hosts", multiNodeProcessedHosts := processMultiNodeHosts( - multiNodeHosts.value, - multiNodeHostsFileName.value, - multiNodeJavaName.value, - streams.value), + multiNodeHosts.value, + multiNodeHostsFileName.value, + multiNodeJavaName.value, + streams.value), multiNodeTargetDirName := "multi-node-test", multiNodeJavaName := "java", // TODO there must be a way get at keys in the tasks that I just don't get @@ -242,7 +242,8 @@ object MultiJvmPlugin extends AutoPlugin { .filter(_.isFile) .foreach(classpathFile => IO.copyFile(classpathFile, new File(multiRunCopiedClassDir, classpathFile.getName), true)) - val cp = directoryBasedClasspathEntries.absString + File.pathSeparator + multiRunCopiedClassDir.getAbsolutePath + File.separator + "*" + val cp = + directoryBasedClasspathEntries.absString + File.pathSeparator + multiRunCopiedClassDir.getAbsolutePath + File.separator + "*" (testClass: String) => { Seq("-cp", cp, runner, "-s", testClass) ++ options } } diff --git a/project/ScalafixSupport.scala b/project/ScalafixSupport.scala index 0e8fbdb346..792468ae7c 100644 --- a/project/ScalafixSupport.scala +++ b/project/ScalafixSupport.scala @@ -40,10 +40,10 @@ trait ScalafixSupport { def updateProjectCommands(alias: String, value: String): Def.Setting[Seq[Command]] = { commands := { - commands.value.filterNot({ + commands.value.filterNot { case command: SimpleCommand => command.name == alias case _ => false - }) :+ BasicCommands.newAlias(name = alias, value = value) + } :+ BasicCommands.newAlias(name = alias, value = value) } } } diff --git a/project/SigarLoader.scala b/project/SigarLoader.scala index 3efd49a820..09db2a21ba 100644 --- a/project/SigarLoader.scala +++ b/project/SigarLoader.scala @@ -41,9 +41,9 @@ object SigarLoader { sigarFolder := target.value / "native", sigarOptions := "-javaagent:" + sigarArtifact.value + "=" + sigarFolderProperty + "=" + sigarFolder.value, // - Test / fork := true) ++ (// Invoke Sigar agent at JVM init time, to extract and load native Sigar library. - if (sigarTestEnabled) Seq(Test / javaOptions += sigarOptions.value) - else Seq()) + Test / fork := true) ++ ( // Invoke Sigar agent at JVM init time, to extract and load native Sigar library. + if (sigarTestEnabled) Seq(Test / javaOptions += sigarOptions.value) + else Seq()) } } diff --git a/project/StreamOperatorsIndexGenerator.scala b/project/StreamOperatorsIndexGenerator.scala index ae90f1ea50..09600449d4 100644 --- a/project/StreamOperatorsIndexGenerator.scala +++ b/project/StreamOperatorsIndexGenerator.scala @@ -74,7 +74,7 @@ object StreamOperatorsIndexGenerator extends AutoPlugin { // FIXME document these methods as well val pendingTestCases = Map( - "Source" -> (pendingSourceOrFlow), + "Source" -> pendingSourceOrFlow, "Flow" -> (pendingSourceOrFlow ++ Seq( "lazyInit", "fromProcessorMat", @@ -199,18 +199,17 @@ object StreamOperatorsIndexGenerator extends AutoPlugin { category -> (element, method, md, description) } .groupBy(_._1) - .mapValues( - lines => - "| |Operator|Description|\n" ++ // TODO mini images here too - "|--|--|--|\n" ++ - lines - .map(_._2) - .sortBy(_._2) - .map { - case (element, method, md, description) => - s"""|$element|@ref[${methodToShow(method)}]($md)|$description|""" - } - .mkString("\n")) + .mapValues(lines => + "| |Operator|Description|\n" ++ // TODO mini images here too + "|--|--|--|\n" ++ + lines + .map(_._2) + .sortBy(_._2) + .map { + case (element, method, md, description) => + s"""|$element|@ref[${methodToShow(method)}]($md)|$description|""" + } + .mkString("\n")) val tables = categories .map { category => diff --git a/project/TestExtras.scala b/project/TestExtras.scala index ac1b442c38..76f714ff8d 100644 --- a/project/TestExtras.scala +++ b/project/TestExtras.scala @@ -39,7 +39,7 @@ object TestExtras { onlyTestTags := Params.testTagsOnly, // add filters for tests excluded by name Test / testOptions ++= excludeTestNames.value.toSeq.map(exclude => - Tests.Filter(test => !test.contains(exclude))), + Tests.Filter(test => !test.contains(exclude))), // add arguments for tests excluded by tag Test / testOptions ++= { val tags = excludeTestTags.value diff --git a/project/ValidatePullRequest.scala b/project/ValidatePullRequest.scala index 4e7bfbbaee..e4c4630a84 100644 --- a/project/ValidatePullRequest.scala +++ b/project/ValidatePullRequest.scala @@ -32,11 +32,11 @@ object AkkaValidatePullRequest extends AutoPlugin { val additionalTasks = settingKey[Seq[TaskKey[_]]]("Additional tasks for pull request validation") override lazy val globalSettings = Seq(credentials ++= { - // todo this should probably be supplied properly - GitHub.envTokenOrThrow.map { token => - Credentials("GitHub API", "api.github.com", "", token) - } - }, additionalTasks := Seq.empty) + // todo this should probably be supplied properly + GitHub.envTokenOrThrow.map { token => + Credentials("GitHub API", "api.github.com", "", token) + } + }, additionalTasks := Seq.empty) override lazy val buildSettings = Seq( validatePullRequest / includeFilter := PathGlobFilter("akka-*/**"), @@ -45,16 +45,16 @@ object AkkaValidatePullRequest extends AutoPlugin { prValidatorTargetBranch := "origin/main") override lazy val projectSettings = inConfig(ValidatePR)(Defaults.testTasks) ++ Seq( - ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "performance"), - ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "long-running"), - ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "timing"), - ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "gh-exclude"), - // make it fork just like regular test running - ValidatePR / fork := (Test / fork).value, - ValidatePR / testGrouping := (Test / testGrouping).value, - ValidatePR / javaOptions := (Test / javaOptions).value, - prValidatorTasks := Seq(ValidatePR / test) ++ additionalTasks.value, - prValidatorEnforcedBuildAllTasks := Seq(Test / test) ++ additionalTasks.value) + ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "performance"), + ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "long-running"), + ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "timing"), + ValidatePR / testOptions += Tests.Argument(TestFrameworks.ScalaTest, "-l", "gh-exclude"), + // make it fork just like regular test running + ValidatePR / fork := (Test / fork).value, + ValidatePR / testGrouping := (Test / testGrouping).value, + ValidatePR / javaOptions := (Test / javaOptions).value, + prValidatorTasks := Seq(ValidatePR / test) ++ additionalTasks.value, + prValidatorEnforcedBuildAllTasks := Seq(Test / test) ++ additionalTasks.value) } /**